use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/;
use mro 'c3';
-use Carp::Clan qw/^DBIx::Class/;
-use DBI;
-use DBIx::Class::Storage::DBI::Cursor;
-use DBIx::Class::Storage::Statistics;
-use Scalar::Util();
-use List::Util();
-use Data::Dumper::Concise();
-use Sub::Name ();
-
-__PACKAGE__->mk_group_accessors('simple' =>
- qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid
- _conn_tid transaction_depth _dbh_autocommit _driver_determined savepoints/
-);
+use DBIx::Class::Carp;
+use DBIx::Class::Exception;
+use Scalar::Util qw/refaddr weaken reftype blessed/;
+use List::Util qw/first/;
+use Sub::Name 'subname';
+use Context::Preserve 'preserve_context';
+use Try::Tiny;
+use overload ();
+use Data::Compare (); # no imports!!! guard against insane architecture
+use DBI::Const::GetInfoType (); # no import of retarded global hash
+use namespace::clean;
+
+# default cursor class, overridable in connect_info attributes
+__PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor');
+
+__PACKAGE__->mk_group_accessors('inherited' => qw/
+ sql_limit_dialect sql_quote_char sql_name_sep
+/);
+
+__PACKAGE__->mk_group_accessors('component_class' => qw/sql_maker_class datetime_parser_type/);
+
+__PACKAGE__->sql_maker_class('DBIx::Class::SQLMaker');
+__PACKAGE__->datetime_parser_type('DateTime::Format::MySQL'); # historic default
+
+__PACKAGE__->sql_name_sep('.');
+
+__PACKAGE__->mk_group_accessors('simple' => qw/
+ _connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined
+ _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts _dbh_autocommit
+ _perform_autoinc_retrieval _autoinc_supplied_for_op
+/);
# the values for these accessors are picked out (and deleted) from
# the attribute hashref passed to connect_info
__PACKAGE__->mk_group_accessors('simple' => @storage_options);
-# default cursor class, overridable in connect_info attributes
-__PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor');
+# capability definitions, using a 2-tiered accessor system
+# The rationale is:
+#
+# A driver/user may define _use_X, which blindly without any checks says:
+# "(do not) use this capability", (use_dbms_capability is an "inherited"
+# type accessor)
+#
+# If _use_X is undef, _supports_X is then queried. This is a "simple" style
+# accessor, which in turn calls _determine_supports_X, and stores the return
+# in a special slot on the storage object, which is wiped every time a $dbh
+# reconnection takes place (it is not guaranteed that upon reconnection we
+# will get the same rdbms version). _determine_supports_X does not need to
+# exist on a driver, as we ->can for it before calling.
-__PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class/);
-__PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks');
+my @capabilities = (qw/
+ insert_returning
+ insert_returning_bound
+ multicolumn_in
+
+ placeholders
+ typeless_placeholders
+
+ join_optimizer
+/);
+__PACKAGE__->mk_group_accessors( dbms_capability => map { "_supports_$_" } @capabilities );
+__PACKAGE__->mk_group_accessors( use_dbms_capability => map { "_use_$_" } (@capabilities ) );
+
+# on by default, not strictly a capability (pending rewrite)
+__PACKAGE__->_use_join_optimizer (1);
+sub _determine_supports_join_optimizer { 1 };
# Each of these methods need _determine_driver called before itself
# in order to function reliably. This is a purely DRY optimization
+#
+# get_(use)_dbms_capability need to be called on the correct Storage
+# class, as _use_X may be hardcoded class-wide, and _supports_X calls
+# _determine_supports_X which obv. needs a correct driver as well
my @rdbms_specific_methods = qw/
deployment_statements
sqlt_type
+ sql_maker
build_datetime_parser
datetime_parser_type
+ txn_begin
insert
insert_bulk
update
delete
select
select_single
+ with_deferred_fk_checks
+
+ get_use_dbms_capability
+ get_dbms_capability
+
+ _server_info
+ _get_server_version
/;
for my $meth (@rdbms_specific_methods) {
my $orig = __PACKAGE__->can ($meth)
- or next;
+ or die "$meth is not a ::Storage::DBI method!";
no strict qw/refs/;
no warnings qw/redefine/;
- *{__PACKAGE__ ."::$meth"} = Sub::Name::subname $meth => sub {
- if (not $_[0]->_driver_determined) {
+ *{__PACKAGE__ ."::$meth"} = subname $meth => sub {
+ if (
+ # only fire when invoked on an instance, a valid class-based invocation
+ # would e.g. be setting a default for an inherited accessor
+ ref $_[0]
+ and
+ ! $_[0]->_driver_determined
+ and
+ ! $_[0]->{_in_determine_driver}
+ ) {
$_[0]->_determine_driver;
- goto $_[0]->can($meth);
+
+ # This for some reason crashes and burns on perl 5.8.1
+ # IFF the method ends up throwing an exception
+ #goto $_[0]->can ($meth);
+
+ my $cref = $_[0]->can ($meth);
+ goto $cref;
}
- $orig->(@_);
+
+ goto $orig;
};
}
-
=head1 NAME
DBIx::Class::Storage::DBI - DBI storage handler
);
$schema->resultset('Book')->search({
- written_on => $schema->storage->datetime_parser(DateTime->now)
+ written_on => $schema->storage->datetime_parser->format_datetime(DateTime->now)
});
=head1 DESCRIPTION
sub new {
my $new = shift->next::method(@_);
- $new->transaction_depth(0);
$new->_sql_maker_opts({});
- $new->{savepoints} = [];
- $new->{_in_dbh_do} = 0;
+ $new->_dbh_details({});
+ $new->{_in_do_block} = 0;
$new->{_dbh_gen} = 0;
+ # read below to see what this does
+ $new->_arm_global_destructor;
+
$new;
}
+# This is hack to work around perl shooting stuff in random
+# order on exit(). If we do not walk the remaining storage
+# objects in an END block, there is a *small but real* chance
+# of a fork()ed child to kill the parent's shared DBI handle,
+# *before perl reaches the DESTROY in this package*
+# Yes, it is ugly and effective.
+# Additionally this registry is used by the CLONE method to
+# make sure no handles are shared between threads
+{
+ my %seek_and_destroy;
+
+ sub _arm_global_destructor {
+ weaken (
+ $seek_and_destroy{ refaddr($_[0]) } = $_[0]
+ );
+ }
+
+ END {
+ local $?; # just in case the DBI destructor changes it somehow
+
+ # destroy just the object if not native to this process
+ $_->_verify_pid for (grep
+ { defined $_ }
+ values %seek_and_destroy
+ );
+ }
+
+ sub CLONE {
+ # As per DBI's recommendation, DBIC disconnects all handles as
+ # soon as possible (DBIC will reconnect only on demand from within
+ # the thread)
+ my @instances = grep { defined $_ } values %seek_and_destroy;
+ for (@instances) {
+ $_->{_dbh_gen}++; # so that existing cursors will drop as well
+ $_->_dbh(undef);
+
+ $_->transaction_depth(0);
+ $_->savepoints([]);
+ }
+
+ # properly renumber all existing refs
+ %seek_and_destroy = ();
+ $_->_arm_global_destructor for @instances;
+ }
+}
+
+sub DESTROY {
+ my $self = shift;
+
+ # some databases spew warnings on implicit disconnect
+ $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
+ local $SIG{__WARN__} = sub {};
+ $self->_dbh(undef);
+
+ # this op is necessary, since the very last perl runtime statement
+ # triggers a global destruction shootout, and the $SIG localization
+ # may very well be destroyed before perl actually gets to do the
+ # $dbh undef
+ 1;
+}
+
+# handle pid changes correctly - do not destroy parent's connection
+sub _verify_pid {
+ my $self = shift;
+
+ my $pid = $self->_conn_pid;
+ if( defined $pid and $pid != $$ and my $dbh = $self->_dbh ) {
+ $dbh->{InactiveDestroy} = 1;
+ $self->{_dbh_gen}++;
+ $self->_dbh(undef);
+ $self->transaction_depth(0);
+ $self->savepoints([]);
+ }
+
+ return;
+}
+
=head2 connect_info
This method is normally called by L<DBIx::Class::Schema/connection>, which
=head3 DBIx::Class specific connection attributes
-In addition to the standard L<DBI|DBI/ATTRIBUTES_COMMON_TO_ALL_HANDLES>
-L<connection|DBI/Database_Handle_Attributes> attributes, DBIx::Class recognizes
+In addition to the standard L<DBI|DBI/ATTRIBUTES COMMON TO ALL HANDLES>
+L<connection|DBI/Database Handle Attributes> attributes, DBIx::Class recognizes
the following connection options. These options can be mixed in with your other
-L<DBI> connection attributes, or placed in a seperate hashref
+L<DBI> connection attributes, or placed in a separate hashref
(C<\%extra_attributes>) as shown above.
Every time C<connect_info> is invoked, any previous settings for
=item limit_dialect
-Sets the limit dialect. This is useful for JDBC-bridge among others
-where the remote SQL-dialect cannot be determined by the name of the
-driver alone. See also L<SQL::Abstract::Limit>.
+Sets a specific SQL::Abstract::Limit-style limit dialect, overriding the
+default L</sql_limit_dialect> setting of the storage (if any). For a list
+of available limit dialects see L<DBIx::Class::SQLMaker::LimitDialects>.
+
+=item quote_names
+
+When true automatically sets L</quote_char> and L</name_sep> to the characters
+appropriate for your particular RDBMS. This option is preferred over specifying
+L</quote_char> directly.
=item quote_char
-Specifies what characters to use to quote table and column names. If
-you use this you will want to specify L</name_sep> as well.
+Specifies what characters to use to quote table and column names.
C<quote_char> expects either a single character, in which case is it
is placed on either side of the table/column name, or an arrayref of length
=item name_sep
-This only needs to be used in conjunction with C<quote_char>, and is used to
-specify the charecter that seperates elements (schemas, tables, columns) from
-each other. In most cases this is simply a C<.>.
-
-The consequences of not supplying this value is that L<SQL::Abstract>
-will assume DBIx::Class' uses of aliases to be complete column
-names. The output will look like I<"me.name"> when it should actually
-be I<"me"."name">.
+This parameter is only useful in conjunction with C<quote_char>, and is used to
+specify the character that separates elements (schemas, tables, columns) from
+each other. If unspecified it defaults to the most commonly used C<.>.
=item unsafe
'postgres',
'my_pg_password',
{ AutoCommit => 1 },
- { quote_char => q{"}, name_sep => q{.} },
+ { quote_char => q{"} },
]
);
my @args = @{ $info->{arguments} };
- $self->_dbi_connect_info([@args,
- %attrs && !(ref $args[0] eq 'CODE') ? \%attrs : ()]);
+ if (keys %attrs and ref $args[0] ne 'CODE') {
+ carp
+ 'You provided explicit AutoCommit => 0 in your connection_info. '
+ . 'This is almost universally a bad idea (see the footnotes of '
+ . 'DBIx::Class::Storage::DBI for more info). If you still want to '
+ . 'do this you can set $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK} to disable '
+ . 'this warning.'
+ if ! $attrs{AutoCommit} and ! $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK};
+
+ push @args, \%attrs if keys %attrs;
+ }
+ $self->_dbi_connect_info(\@args);
+
+ # FIXME - dirty:
+ # save attributes them in a separate accessor so they are always
+ # introspectable, even in case of a CODE $dbhmaker
+ $self->_dbic_connect_attributes (\%attrs);
return $self->_connect_info;
}
delete @attrs{@storage_opts} if @storage_opts;
my @sql_maker_opts = grep exists $attrs{$_},
- qw/limit_dialect quote_char name_sep/;
+ qw/limit_dialect quote_char name_sep quote_names/;
@{ $info{sql_maker_options} }{@sql_maker_opts} =
delete @attrs{@sql_maker_opts} if @sql_maker_opts;
return \%info;
}
-sub _default_dbi_connect_attributes {
- return {
+sub _default_dbi_connect_attributes () {
+ +{
AutoCommit => 1,
- RaiseError => 1,
PrintError => 0,
+ RaiseError => 1,
+ ShowErrorStatement => 1,
};
}
sub dbh_do {
my $self = shift;
- my $code = shift;
-
- my $dbh = $self->_get_dbh;
-
- return $self->$code($dbh, @_) if $self->{_in_dbh_do}
- || $self->{transaction_depth};
-
- local $self->{_in_dbh_do} = 1;
-
- my @result;
- my $want_array = wantarray;
+ my $run_target = shift;
- eval {
-
- if($want_array) {
- @result = $self->$code($dbh, @_);
- }
- elsif(defined $want_array) {
- $result[0] = $self->$code($dbh, @_);
- }
- else {
- $self->$code($dbh, @_);
- }
- };
-
- # ->connected might unset $@ - copy
- my $exception = $@;
- if(!$exception) { return $want_array ? @result : $result[0] }
+ # short circuit when we know there is no need for a runner
+ #
+ # FIXME - asumption may be wrong
+ # the rationale for the txn_depth check is that if this block is a part
+ # of a larger transaction, everything up to that point is screwed anyway
+ return $self->$run_target($self->_get_dbh, @_)
+ if $self->{_in_do_block} or $self->transaction_depth;
- $self->throw_exception($exception) if $self->connected;
+ my $args = \@_;
- # We were not connected - reconnect and retry, but let any
- # exception fall right through this time
- carp "Retrying $code after catching disconnected exception: $exception"
- if $ENV{DBIC_DBIRETRY_DEBUG};
- $self->_populate_dbh;
- $self->$code($self->_dbh, @_);
+ DBIx::Class::Storage::BlockRunner->new(
+ storage => $self,
+ run_code => sub { $self->$run_target ($self->_get_dbh, @$args ) },
+ wrap_txn => 0,
+ retry_handler => sub { ! ( $_[0]->retried_count or $_[0]->storage->connected ) },
+ )->run;
}
-# This is basically a blend of dbh_do above and DBIx::Class::Storage::txn_do.
-# It also informs dbh_do to bypass itself while under the direction of txn_do,
-# via $self->{_in_dbh_do} (this saves some redundant eval and errorcheck, etc)
sub txn_do {
- my $self = shift;
- my $coderef = shift;
-
- ref $coderef eq 'CODE' or $self->throw_exception
- ('$coderef must be a CODE reference');
-
- return $coderef->(@_) if $self->{transaction_depth} && ! $self->auto_savepoint;
-
- local $self->{_in_dbh_do} = 1;
-
- my @result;
- my $want_array = wantarray;
-
- my $tried = 0;
- while(1) {
- eval {
- $self->_get_dbh;
-
- $self->txn_begin;
- if($want_array) {
- @result = $coderef->(@_);
- }
- elsif(defined $want_array) {
- $result[0] = $coderef->(@_);
- }
- else {
- $coderef->(@_);
- }
- $self->txn_commit;
- };
-
- # ->connected might unset $@ - copy
- my $exception = $@;
- if(!$exception) { return $want_array ? @result : $result[0] }
-
- if($tried++ || $self->connected) {
- eval { $self->txn_rollback };
- my $rollback_exception = $@;
- if($rollback_exception) {
- my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
- $self->throw_exception($exception) # propagate nested rollback
- if $rollback_exception =~ /$exception_class/;
-
- $self->throw_exception(
- "Transaction aborted: ${exception}. "
- . "Rollback failed: ${rollback_exception}"
- );
- }
- $self->throw_exception($exception)
- }
-
- # We were not connected, and was first try - reconnect and retry
- # via the while loop
- carp "Retrying $coderef after catching disconnected exception: $exception"
- if $ENV{DBIC_DBIRETRY_DEBUG};
- $self->_populate_dbh;
- }
+ $_[0]->_get_dbh; # connects or reconnects on pid change, necessary to grab correct txn_depth
+ shift->next::method(@_);
}
=head2 disconnect
$self->_do_connection_actions(disconnect_call_ => $_) for @actions;
- $self->_dbh_rollback unless $self->_dbh_autocommit;
+ # stops the "implicit rollback on disconnect" warning
+ $self->_exec_txn_rollback unless $self->_dbh_autocommit;
+ %{ $self->_dbh->{CachedKids} } = ();
$self->_dbh->disconnect;
$self->_dbh(undef);
$self->{_dbh_gen}++;
=back
-Verifies that the the current database handle is active and ready to execute
-an SQL statement (i.e. the connection did not get stale, server is still
+Verifies that the current database handle is active and ready to execute
+an SQL statement (e.g. the connection did not get stale, server is still
answering, etc.) This method is used internally by L</dbh>.
=cut
sub _seems_connected {
my $self = shift;
+ $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
+
my $dbh = $self->_dbh
or return 0;
- if(defined $self->_conn_tid && $self->_conn_tid != threads->tid) {
- $self->_dbh(undef);
- $self->{_dbh_gen}++;
- return 0;
- }
- else {
- $self->_verify_pid;
- return 0 if !$self->_dbh;
- }
-
return $dbh->FETCH('Active');
}
return $dbh->ping;
}
-# handle pid changes correctly
-# NOTE: assumes $self->_dbh is a valid $dbh
-sub _verify_pid {
- my ($self) = @_;
-
- return if defined $self->_conn_pid && $self->_conn_pid == $$;
-
- $self->_dbh->{InactiveDestroy} = 1;
- $self->_dbh(undef);
- $self->{_dbh_gen}++;
-
- return;
-}
-
sub ensure_connected {
my ($self) = @_;
Returns a C<$dbh> - a data base handle of class L<DBI>. The returned handle
is guaranteed to be healthy by implicitly calling L</connected>, and if
necessary performing a reconnection before returning. Keep in mind that this
-is very B<expensive> on some database engines. Consider using L<dbh_do>
+is very B<expensive> on some database engines. Consider using L</dbh_do>
instead.
=cut
# this is the internal "get dbh or connect (don't check)" method
sub _get_dbh {
my $self = shift;
- $self->_verify_pid if $self->_dbh;
+ $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
$self->_populate_dbh unless $self->_dbh;
return $self->_dbh;
}
-sub _sql_maker_args {
- my ($self) = @_;
-
- return (
- bindtype=>'columns',
- array_datatypes => 1,
- limit_dialect => $self->_get_dbh,
- %{$self->_sql_maker_opts}
- );
-}
-
sub sql_maker {
my ($self) = @_;
unless ($self->_sql_maker) {
my $sql_maker_class = $self->sql_maker_class;
- $self->ensure_class_loaded ($sql_maker_class);
- $self->_sql_maker($sql_maker_class->new( $self->_sql_maker_args ));
+
+ my %opts = %{$self->_sql_maker_opts||{}};
+ my $dialect =
+ $opts{limit_dialect}
+ ||
+ $self->sql_limit_dialect
+ ||
+ do {
+ my $s_class = (ref $self) || $self;
+ carp (
+ "Your storage class ($s_class) does not set sql_limit_dialect and you "
+ . 'have not supplied an explicit limit_dialect in your connection_info. '
+ . 'DBIC will attempt to use the GenericSubQ dialect, which works on most '
+ . 'databases but can be (and often is) painfully slow. '
+ . "Please file an RT ticket against '$s_class' ."
+ );
+
+ 'GenericSubQ';
+ }
+ ;
+
+ my ($quote_char, $name_sep);
+
+ if ($opts{quote_names}) {
+ $quote_char = (delete $opts{quote_char}) || $self->sql_quote_char || do {
+ my $s_class = (ref $self) || $self;
+ carp (
+ "You requested 'quote_names' but your storage class ($s_class) does "
+ . 'not explicitly define a default sql_quote_char and you have not '
+ . 'supplied a quote_char as part of your connection_info. DBIC will '
+ .q{default to the ANSI SQL standard quote '"', which works most of }
+ . "the time. Please file an RT ticket against '$s_class'."
+ );
+
+ '"'; # RV
+ };
+
+ $name_sep = (delete $opts{name_sep}) || $self->sql_name_sep;
+ }
+
+ $self->_sql_maker($sql_maker_class->new(
+ bindtype=>'columns',
+ array_datatypes => 1,
+ limit_dialect => $dialect,
+ ($quote_char ? (quote_char => $quote_char) : ()),
+ name_sep => ($name_sep || '.'),
+ %opts,
+ ));
}
return $self->_sql_maker;
}
my @info = @{$self->_dbi_connect_info || []};
$self->_dbh(undef); # in case ->connected failed we might get sent here
+ $self->_dbh_details({}); # reset everything we know
+
$self->_dbh($self->_connect(@info));
- $self->_conn_pid($$);
- $self->_conn_tid(threads->tid) if $INC{'threads.pm'};
+ $self->_conn_pid($$) unless DBIx::Class::_ENV_::BROKEN_FORK; # on win32 these are in fact threads
$self->_determine_driver;
$self->_do_connection_actions(connect_call_ => $_) for @actions;
}
+
+
+sub set_use_dbms_capability {
+ $_[0]->set_inherited ($_[1], $_[2]);
+}
+
+sub get_use_dbms_capability {
+ my ($self, $capname) = @_;
+
+ my $use = $self->get_inherited ($capname);
+ return defined $use
+ ? $use
+ : do { $capname =~ s/^_use_/_supports_/; $self->get_dbms_capability ($capname) }
+ ;
+}
+
+sub set_dbms_capability {
+ $_[0]->_dbh_details->{capability}{$_[1]} = $_[2];
+}
+
+sub get_dbms_capability {
+ my ($self, $capname) = @_;
+
+ my $cap = $self->_dbh_details->{capability}{$capname};
+
+ unless (defined $cap) {
+ if (my $meth = $self->can ("_determine$capname")) {
+ $cap = $self->$meth ? 1 : 0;
+ }
+ else {
+ $cap = 0;
+ }
+
+ $self->set_dbms_capability ($capname, $cap);
+ }
+
+ return $cap;
+}
+
+sub _server_info {
+ my $self = shift;
+
+ my $info;
+ unless ($info = $self->_dbh_details->{info}) {
+
+ $info = {};
+
+ my $server_version = try {
+ $self->_get_server_version
+ } catch {
+ # driver determination *may* use this codepath
+ # in which case we must rethrow
+ $self->throw_exception($_) if $self->{_in_determine_driver};
+
+ # $server_version on failure
+ undef;
+ };
+
+ if (defined $server_version) {
+ $info->{dbms_version} = $server_version;
+
+ my ($numeric_version) = $server_version =~ /^([\d\.]+)/;
+ my @verparts = split (/\./, $numeric_version);
+ if (
+ @verparts
+ &&
+ $verparts[0] <= 999
+ ) {
+ # consider only up to 3 version parts, iff not more than 3 digits
+ my @use_parts;
+ while (@verparts && @use_parts < 3) {
+ my $p = shift @verparts;
+ last if $p > 999;
+ push @use_parts, $p;
+ }
+ push @use_parts, 0 while @use_parts < 3;
+
+ $info->{normalized_dbms_version} = sprintf "%d.%03d%03d", @use_parts;
+ }
+ }
+
+ $self->_dbh_details->{info} = $info;
+ }
+
+ return $info;
+}
+
+sub _get_server_version {
+ shift->_dbh_get_info('SQL_DBMS_VER');
+}
+
+sub _dbh_get_info {
+ my ($self, $info) = @_;
+
+ if ($info =~ /[^0-9]/) {
+ $info = $DBI::Const::GetInfoType::GetInfoType{$info};
+ $self->throw_exception("Info type '$_[1]' not provided by DBI::Const::GetInfoType")
+ unless defined $info;
+ }
+
+ return $self->_get_dbh->get_info($info);
+}
+
+sub _describe_connection {
+ require DBI::Const::GetInfoReturn;
+
+ my $self = shift;
+ $self->ensure_connected;
+
+ my $res = {
+ DBIC_DSN => $self->_dbi_connect_info->[0],
+ DBI_VER => DBI->VERSION,
+ DBIC_VER => DBIx::Class->VERSION,
+ DBIC_DRIVER => ref $self,
+ };
+
+ for my $inf (
+ #keys %DBI::Const::GetInfoType::GetInfoType,
+ qw/
+ SQL_CURSOR_COMMIT_BEHAVIOR
+ SQL_CURSOR_ROLLBACK_BEHAVIOR
+ SQL_CURSOR_SENSITIVITY
+ SQL_DATA_SOURCE_NAME
+ SQL_DBMS_NAME
+ SQL_DBMS_VER
+ SQL_DEFAULT_TXN_ISOLATION
+ SQL_DM_VER
+ SQL_DRIVER_NAME
+ SQL_DRIVER_ODBC_VER
+ SQL_DRIVER_VER
+ SQL_EXPRESSIONS_IN_ORDERBY
+ SQL_GROUP_BY
+ SQL_IDENTIFIER_CASE
+ SQL_IDENTIFIER_QUOTE_CHAR
+ SQL_MAX_CATALOG_NAME_LEN
+ SQL_MAX_COLUMN_NAME_LEN
+ SQL_MAX_IDENTIFIER_LEN
+ SQL_MAX_TABLE_NAME_LEN
+ SQL_MULTIPLE_ACTIVE_TXN
+ SQL_MULT_RESULT_SETS
+ SQL_NEED_LONG_DATA_LEN
+ SQL_NON_NULLABLE_COLUMNS
+ SQL_ODBC_VER
+ SQL_QUALIFIER_NAME_SEPARATOR
+ SQL_QUOTED_IDENTIFIER_CASE
+ SQL_TXN_CAPABLE
+ SQL_TXN_ISOLATION_OPTION
+ /
+ ) {
+ my $v = $self->_dbh_get_info($inf);
+ next unless defined $v;
+
+ #my $key = sprintf( '%s(%s)', $inf, $DBI::Const::GetInfoType::GetInfoType{$inf} );
+ my $expl = DBI::Const::GetInfoReturn::Explain($inf, $v);
+ $res->{$inf} = DBI::Const::GetInfoReturn::Format($inf, $v) . ( $expl ? " ($expl)" : '' );
+ }
+
+ $res;
+}
+
sub _determine_driver {
my ($self) = @_;
if ($self->_dbh) { # we are connected
$driver = $self->_dbh->{Driver}{Name};
$started_connected = 1;
- } else {
+ }
+ else {
# if connect_info is a CODEREF, we have no choice but to connect
if (ref $self->_dbi_connect_info->[0] &&
- Scalar::Util::reftype($self->_dbi_connect_info->[0]) eq 'CODE') {
+ reftype $self->_dbi_connect_info->[0] eq 'CODE') {
$self->_populate_dbh;
$driver = $self->_dbh->{Driver}{Name};
}
else {
# try to use dsn to not require being connected, the driver may still
# force a connection in _rebless to determine version
- ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i;
+ # (dsn may not be supplied at all if all we do is make a mock-schema)
+ my $dsn = $self->_dbi_connect_info->[0] || $ENV{DBI_DSN} || '';
+ ($driver) = $dsn =~ /dbi:([^:]+):/i;
+ $driver ||= $ENV{DBI_DRIVER};
}
}
- my $storage_class = "DBIx::Class::Storage::DBI::${driver}";
- if ($self->load_optional_class($storage_class)) {
- mro::set_mro($storage_class, 'c3');
- bless $self, $storage_class;
- $self->_rebless();
+ if ($driver) {
+ my $storage_class = "DBIx::Class::Storage::DBI::${driver}";
+ if ($self->load_optional_class($storage_class)) {
+ mro::set_mro($storage_class, 'c3');
+ bless $self, $storage_class;
+ $self->_rebless();
+ }
+ else {
+ $self->_warn_undetermined_driver(
+ 'This version of DBIC does not yet seem to supply a driver for '
+ . "your particular RDBMS and/or connection method ('$driver')."
+ );
+ }
+ }
+ else {
+ $self->_warn_undetermined_driver(
+ 'Unable to extract a driver name from connect info - this '
+ . 'should not have happened.'
+ );
}
}
$self->_driver_determined(1);
+ Class::C3->reinitialize() if DBIx::Class::_ENV_::OLD_MRO;
+
$self->_init; # run driver-specific initializations
$self->_run_connection_actions
}
}
+sub _determine_connector_driver {
+ my ($self, $conn) = @_;
+
+ my $dbtype = $self->_dbh_get_info('SQL_DBMS_NAME');
+
+ if (not $dbtype) {
+ $self->_warn_undetermined_driver(
+ 'Unable to retrieve RDBMS type (SQL_DBMS_NAME) of the engine behind your '
+ . "$conn connector - this should not have happened."
+ );
+ return;
+ }
+
+ $dbtype =~ s/\W/_/gi;
+
+ my $subclass = "DBIx::Class::Storage::DBI::${conn}::${dbtype}";
+ return if $self->isa($subclass);
+
+ if ($self->load_optional_class($subclass)) {
+ bless $self, $subclass;
+ $self->_rebless;
+ }
+ else {
+ $self->_warn_undetermined_driver(
+ 'This version of DBIC does not yet seem to supply a driver for '
+ . "your particular RDBMS and/or connection method ('$conn/$dbtype')."
+ );
+ }
+}
+
+sub _warn_undetermined_driver {
+ my ($self, $msg) = @_;
+
+ require Data::Dumper::Concise;
+
+ carp_once ($msg . ' While we will attempt to continue anyway, the results '
+ . 'are likely to be underwhelming. Please upgrade DBIC, and if this message '
+ . "does not go away, file a bugreport including the following info:\n"
+ . Data::Dumper::Concise::Dumper($self->_describe_connection)
+ );
+}
+
sub _do_connection_actions {
my $self = shift;
my $method_prefix = shift;
my $attrs = shift @do_args;
my @bind = map { [ undef, $_ ] } @do_args;
- $self->_query_start($sql, @bind);
- $self->_get_dbh->do($sql, $attrs, @do_args);
- $self->_query_end($sql, @bind);
+ $self->dbh_do(sub {
+ $_[0]->_query_start($sql, \@bind);
+ $_[1]->do($sql, $attrs, @do_args);
+ $_[0]->_query_end($sql, \@bind);
+ });
}
return $self;
my ($old_connect_via, $dbh);
- if ($INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}) {
- $old_connect_via = $DBI::connect_via;
- $DBI::connect_via = 'connect';
- }
+ local $DBI::connect_via = 'connect' if $INC{'Apache/DBI.pm'} && $ENV{MOD_PERL};
- eval {
+ try {
if(ref $info[0] eq 'CODE') {
- $dbh = $info[0]->();
+ $dbh = $info[0]->();
}
else {
- $dbh = DBI->connect(@info);
+ require DBI;
+ $dbh = DBI->connect(@info);
+ }
+
+ if (!$dbh) {
+ die $DBI::errstr;
}
- if($dbh && !$self->unsafe) {
- my $weak_self = $self;
- Scalar::Util::weaken($weak_self);
- $dbh->{HandleError} = sub {
+ unless ($self->unsafe) {
+
+ $self->throw_exception(
+ 'Refusing clobbering of {HandleError} installed on externally supplied '
+ ."DBI handle $dbh. Either remove the handler or use the 'unsafe' attribute."
+ ) if $dbh->{HandleError} and ref $dbh->{HandleError} ne '__DBIC__DBH__ERROR__HANDLER__';
+
+ # Default via _default_dbi_connect_attributes is 1, hence it was an explicit
+ # request, or an external handle. Complain and set anyway
+ unless ($dbh->{RaiseError}) {
+ carp( ref $info[0] eq 'CODE'
+
+ ? "The 'RaiseError' of the externally supplied DBI handle is set to false. "
+ ."DBIx::Class will toggle it back to true, unless the 'unsafe' connect "
+ .'attribute has been supplied'
+
+ : 'RaiseError => 0 supplied in your connection_info, without an explicit '
+ .'unsafe => 1. Toggling RaiseError back to true'
+ );
+
+ $dbh->{RaiseError} = 1;
+ }
+
+ # this odd anonymous coderef dereference is in fact really
+ # necessary to avoid the unwanted effect described in perl5
+ # RT#75792
+ sub {
+ my $weak_self = $_[0];
+ weaken $weak_self;
+
+ # the coderef is blessed so we can distinguish it from externally
+ # supplied handles (which must be preserved)
+ $_[1]->{HandleError} = bless sub {
if ($weak_self) {
$weak_self->throw_exception("DBI Exception: $_[0]");
}
else {
# the handler may be invoked by something totally out of
# the scope of DBIC
- croak ("DBI Exception: $_[0]");
+ DBIx::Class::Exception->throw("DBI Exception (unhandled by DBIC, ::Schema GCed): $_[0]");
}
- };
- $dbh->{ShowErrorStatement} = 1;
- $dbh->{RaiseError} = 1;
- $dbh->{PrintError} = 0;
+ }, '__DBIC__DBH__ERROR__HANDLER__';
+ }->($self, $dbh);
}
+ }
+ catch {
+ $self->throw_exception("DBI Connection failed: $_")
};
- $DBI::connect_via = $old_connect_via if $old_connect_via;
-
- $self->throw_exception("DBI Connection failed: " . ($@||$DBI::errstr))
- if !$dbh || $@;
-
$self->_dbh_autocommit($dbh->{AutoCommit});
-
$dbh;
}
-sub svp_begin {
- my ($self, $name) = @_;
-
- $name = $self->_svp_generate_name
- unless defined $name;
-
- $self->throw_exception ("You can't use savepoints outside a transaction")
- if $self->{transaction_depth} == 0;
-
- $self->throw_exception ("Your Storage implementation doesn't support savepoints")
- unless $self->can('_svp_begin');
-
- push @{ $self->{savepoints} }, $name;
+sub txn_begin {
+ my $self = shift;
- $self->debugobj->svp_begin($name) if $self->debug;
+ # this means we have not yet connected and do not know the AC status
+ # (e.g. coderef $dbh), need a full-fledged connection check
+ if (! defined $self->_dbh_autocommit) {
+ $self->ensure_connected;
+ }
+ # Otherwise simply connect or re-connect on pid changes
+ else {
+ $self->_get_dbh;
+ }
- return $self->_svp_begin($name);
+ $self->next::method(@_);
}
-sub svp_release {
- my ($self, $name) = @_;
-
- $self->throw_exception ("You can't use savepoints outside a transaction")
- if $self->{transaction_depth} == 0;
+sub _exec_txn_begin {
+ my $self = shift;
- $self->throw_exception ("Your Storage implementation doesn't support savepoints")
- unless $self->can('_svp_release');
+ # if the user is utilizing txn_do - good for him, otherwise we need to
+ # ensure that the $dbh is healthy on BEGIN.
+ # We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping"
+ # will be replaced by a failure of begin_work itself (which will be
+ # then retried on reconnect)
+ if ($self->{_in_do_block}) {
+ $self->_dbh->begin_work;
+ } else {
+ $self->dbh_do(sub { $_[1]->begin_work });
+ }
+}
- if (defined $name) {
- $self->throw_exception ("Savepoint '$name' does not exist")
- unless grep { $_ eq $name } @{ $self->{savepoints} };
+sub txn_commit {
+ my $self = shift;
- # Dig through the stack until we find the one we are releasing. This keeps
- # the stack up to date.
- my $svp;
+ $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
+ $self->throw_exception("Unable to txn_commit() on a disconnected storage")
+ unless $self->_dbh;
- do { $svp = pop @{ $self->{savepoints} } } while $svp ne $name;
- } else {
- $name = pop @{ $self->{savepoints} };
+ # esoteric case for folks using external $dbh handles
+ if (! $self->transaction_depth and ! $self->_dbh->FETCH('AutoCommit') ) {
+ carp "Storage transaction_depth 0 does not match "
+ ."false AutoCommit of $self->{_dbh}, attempting COMMIT anyway";
+ $self->transaction_depth(1);
}
- $self->debugobj->svp_release($name) if $self->debug;
+ $self->next::method(@_);
- return $self->_svp_release($name);
+ # if AutoCommit is disabled txn_depth never goes to 0
+ # as a new txn is started immediately on commit
+ $self->transaction_depth(1) if (
+ !$self->transaction_depth
+ and
+ defined $self->_dbh_autocommit
+ and
+ ! $self->_dbh_autocommit
+ );
}
-sub svp_rollback {
- my ($self, $name) = @_;
-
- $self->throw_exception ("You can't use savepoints outside a transaction")
- if $self->{transaction_depth} == 0;
+sub _exec_txn_commit {
+ shift->_dbh->commit;
+}
- $self->throw_exception ("Your Storage implementation doesn't support savepoints")
- unless $self->can('_svp_rollback');
+sub txn_rollback {
+ my $self = shift;
- if (defined $name) {
- # If they passed us a name, verify that it exists in the stack
- unless(grep({ $_ eq $name } @{ $self->{savepoints} })) {
- $self->throw_exception("Savepoint '$name' does not exist!");
- }
+ $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
+ $self->throw_exception("Unable to txn_rollback() on a disconnected storage")
+ unless $self->_dbh;
- # Dig through the stack until we find the one we are releasing. This keeps
- # the stack up to date.
- while(my $s = pop(@{ $self->{savepoints} })) {
- last if($s eq $name);
- }
- # Add the savepoint back to the stack, as a rollback doesn't remove the
- # named savepoint, only everything after it.
- push(@{ $self->{savepoints} }, $name);
- } else {
- # We'll assume they want to rollback to the last savepoint
- $name = $self->{savepoints}->[-1];
+ # esoteric case for folks using external $dbh handles
+ if (! $self->transaction_depth and ! $self->_dbh->FETCH('AutoCommit') ) {
+ carp "Storage transaction_depth 0 does not match "
+ ."false AutoCommit of $self->{_dbh}, attempting ROLLBACK anyway";
+ $self->transaction_depth(1);
}
- $self->debugobj->svp_rollback($name) if $self->debug;
+ $self->next::method(@_);
- return $self->_svp_rollback($name);
+ # if AutoCommit is disabled txn_depth never goes to 0
+ # as a new txn is started immediately on commit
+ $self->transaction_depth(1) if (
+ !$self->transaction_depth
+ and
+ defined $self->_dbh_autocommit
+ and
+ ! $self->_dbh_autocommit
+ );
}
-sub _svp_generate_name {
- my ($self) = @_;
-
- return 'savepoint_'.scalar(@{ $self->{'savepoints'} });
+sub _exec_txn_rollback {
+ shift->_dbh->rollback;
}
-sub txn_begin {
- my $self = shift;
-
- # this means we have not yet connected and do not know the AC status
- # (e.g. coderef $dbh)
- $self->ensure_connected if (! defined $self->_dbh_autocommit);
+# generate some identical methods
+for my $meth (qw/svp_begin svp_release svp_rollback/) {
+ no strict qw/refs/;
+ *{__PACKAGE__ ."::$meth"} = subname $meth => sub {
+ my $self = shift;
+ $self->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK;
+ $self->throw_exception("Unable to $meth() on a disconnected storage")
+ unless $self->_dbh;
+ $self->next::method(@_);
+ };
+}
- if($self->{transaction_depth} == 0) {
- $self->debugobj->txn_begin()
- if $self->debug;
- $self->_dbh_begin_work;
- }
- elsif ($self->auto_savepoint) {
- $self->svp_begin;
- }
- $self->{transaction_depth}++;
+# This used to be the top-half of _execute. It was split out to make it
+# easier to override in NoBindVars without duping the rest. It takes up
+# all of _execute's args, and emits $sql, @bind.
+sub _prep_for_execute {
+ #my ($self, $op, $ident, $args) = @_;
+ return shift->_gen_sql_bind(@_)
}
-sub _dbh_begin_work {
- my $self = shift;
+sub _gen_sql_bind {
+ my ($self, $op, $ident, $args) = @_;
- # if the user is utilizing txn_do - good for him, otherwise we need to
- # ensure that the $dbh is healthy on BEGIN.
- # We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping"
- # will be replaced by a failure of begin_work itself (which will be
- # then retried on reconnect)
- if ($self->{_in_dbh_do}) {
- $self->_dbh->begin_work;
- } else {
- $self->dbh_do(sub { $_[1]->begin_work });
- }
-}
+ my ($sql, @bind) = $self->sql_maker->$op(
+ blessed($ident) ? $ident->from : $ident,
+ @$args,
+ );
-sub txn_commit {
- my $self = shift;
- if ($self->{transaction_depth} == 1) {
- $self->debugobj->txn_commit()
- if ($self->debug);
- $self->_dbh_commit;
- $self->{transaction_depth} = 0
- if $self->_dbh_autocommit;
- }
- elsif($self->{transaction_depth} > 1) {
- $self->{transaction_depth}--;
- $self->svp_release
- if $self->auto_savepoint;
+ if (
+ ! $ENV{DBIC_DT_SEARCH_OK}
+ and
+ $op eq 'select'
+ and
+ first { blessed($_->[1]) && $_->[1]->isa('DateTime') } @bind
+ ) {
+ carp_unique 'DateTime objects passed to search() are not supported '
+ . 'properly (InflateColumn::DateTime formats and settings are not '
+ . 'respected.) See "Formatting DateTime objects in queries" in '
+ . 'DBIx::Class::Manual::Cookbook. To disable this warning for good '
+ . 'set $ENV{DBIC_DT_SEARCH_OK} to true'
}
-}
-sub _dbh_commit {
- my $self = shift;
- my $dbh = $self->_dbh
- or $self->throw_exception('cannot COMMIT on a disconnected handle');
- $dbh->commit;
+ return( $sql, $self->_resolve_bindattrs(
+ $ident, [ @{$args->[2]{bind}||[]}, @bind ]
+ ));
}
-sub txn_rollback {
- my $self = shift;
- my $dbh = $self->_dbh;
- eval {
- if ($self->{transaction_depth} == 1) {
- $self->debugobj->txn_rollback()
- if ($self->debug);
- $self->{transaction_depth} = 0
- if $self->_dbh_autocommit;
- $self->_dbh_rollback;
- }
- elsif($self->{transaction_depth} > 1) {
- $self->{transaction_depth}--;
- if ($self->auto_savepoint) {
- $self->svp_rollback;
- $self->svp_release;
- }
- }
- else {
- die DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION->new;
- }
- };
- if ($@) {
- my $error = $@;
- my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION";
- $error =~ /$exception_class/ and $self->throw_exception($error);
- # ensure that a failed rollback resets the transaction depth
- $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1;
- $self->throw_exception($error);
- }
-}
+sub _resolve_bindattrs {
+ my ($self, $ident, $bind, $colinfos) = @_;
-sub _dbh_rollback {
- my $self = shift;
- my $dbh = $self->_dbh
- or $self->throw_exception('cannot ROLLBACK on a disconnected handle');
- $dbh->rollback;
-}
+ $colinfos ||= {};
-# This used to be the top-half of _execute. It was split out to make it
-# easier to override in NoBindVars without duping the rest. It takes up
-# all of _execute's args, and emits $sql, @bind.
-sub _prep_for_execute {
- my ($self, $op, $extra_bind, $ident, $args) = @_;
+ my $resolve_bindinfo = sub {
+ #my $infohash = shift;
- if( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) {
- $ident = $ident->from();
- }
+ %$colinfos = %{ $self->_resolve_column_info($ident) }
+ unless keys %$colinfos;
+
+ my $ret;
+ if (my $col = $_[0]->{dbic_colname}) {
+ $ret = { %{$_[0]} };
+
+ $ret->{sqlt_datatype} ||= $colinfos->{$col}{data_type}
+ if $colinfos->{$col}{data_type};
+
+ $ret->{sqlt_size} ||= $colinfos->{$col}{size}
+ if $colinfos->{$col}{size};
+ }
- my ($sql, @bind) = $self->sql_maker->$op($ident, @$args);
+ $ret || $_[0];
+ };
- unshift(@bind,
- map { ref $_ eq 'ARRAY' ? $_ : [ '!!dummy', $_ ] } @$extra_bind)
- if $extra_bind;
- return ($sql, \@bind);
+ return [ map {
+ if (ref $_ ne 'ARRAY') {
+ [{}, $_]
+ }
+ elsif (! defined $_->[0]) {
+ [{}, $_->[1]]
+ }
+ elsif (ref $_->[0] eq 'HASH') {
+ [
+ ($_->[0]{dbd_attrs} or $_->[0]{sqlt_datatype}) ? $_->[0] : $resolve_bindinfo->($_->[0]),
+ $_->[1]
+ ]
+ }
+ elsif (ref $_->[0] eq 'SCALAR') {
+ [ { sqlt_datatype => ${$_->[0]} }, $_->[1] ]
+ }
+ else {
+ [ $resolve_bindinfo->({ dbic_colname => $_->[0] }), $_->[1] ]
+ }
+ } @$bind ];
}
+sub _format_for_trace {
+ #my ($self, $bind) = @_;
-sub _fix_bind_params {
- my ($self, @bind) = @_;
+ ### Turn @bind from something like this:
+ ### ( [ "artist", 1 ], [ \%attrs, 3 ] )
+ ### to this:
+ ### ( "'1'", "'3'" )
- ### Turn @bind from something like this:
- ### ( [ "artist", 1 ], [ "cdid", 1, 3 ] )
- ### to this:
- ### ( "'1'", "'1'", "'3'" )
- return
- map {
- if ( defined( $_ && $_->[1] ) ) {
- map { qq{'$_'}; } @{$_}[ 1 .. $#$_ ];
- }
- else { q{'NULL'}; }
- } @bind;
+ map {
+ defined( $_ && $_->[1] )
+ ? qq{'$_->[1]'}
+ : q{NULL}
+ } @{$_[1] || []};
}
sub _query_start {
- my ( $self, $sql, @bind ) = @_;
-
- if ( $self->debug ) {
- @bind = $self->_fix_bind_params(@bind);
+ my ( $self, $sql, $bind ) = @_;
- $self->debugobj->query_start( $sql, @bind );
- }
+ $self->debugobj->query_start( $sql, $self->_format_for_trace($bind) )
+ if $self->debug;
}
sub _query_end {
- my ( $self, $sql, @bind ) = @_;
+ my ( $self, $sql, $bind ) = @_;
- if ( $self->debug ) {
- @bind = $self->_fix_bind_params(@bind);
- $self->debugobj->query_end( $sql, @bind );
- }
+ $self->debugobj->query_end( $sql, $self->_format_for_trace($bind) )
+ if $self->debug;
}
-sub _dbh_execute {
- my ($self, $dbh, $op, $extra_bind, $ident, $bind_attributes, @args) = @_;
-
- my ($sql, $bind) = $self->_prep_for_execute($op, $extra_bind, $ident, \@args);
+my $sba_compat;
+sub _dbi_attrs_for_bind {
+ my ($self, $ident, $bind) = @_;
- $self->_query_start( $sql, @$bind );
+ if (! defined $sba_compat) {
+ $self->_determine_driver;
+ $sba_compat = $self->can('source_bind_attributes') == \&source_bind_attributes
+ ? 0
+ : 1
+ ;
+ }
- my $sth = $self->sth($sql,$op);
+ my $sba_attrs;
+ if ($sba_compat) {
+ my $class = ref $self;
+ carp_unique (
+ "The source_bind_attributes() override in $class relies on a deprecated codepath. "
+ .'You are strongly advised to switch your code to override bind_attribute_by_datatype() '
+ .'instead. This legacy compat shim will also disappear some time before DBIC 0.09'
+ );
- my $placeholder_index = 1;
+ my $sba_attrs = $self->source_bind_attributes
+ }
- foreach my $bound (@$bind) {
- my $attributes = {};
- my($column_name, @data) = @$bound;
+ my @attrs;
- if ($bind_attributes) {
- $attributes = $bind_attributes->{$column_name}
- if defined $bind_attributes->{$column_name};
+ for (map { $_->[0] } @$bind) {
+ push @attrs, do {
+ if (exists $_->{dbd_attrs}) {
+ $_->{dbd_attrs}
+ }
+ elsif($_->{sqlt_datatype}) {
+ # cache the result in the dbh_details hash, as it can not change unless
+ # we connect to something else
+ my $cache = $self->_dbh_details->{_datatype_map_cache} ||= {};
+ if (not exists $cache->{$_->{sqlt_datatype}}) {
+ $cache->{$_->{sqlt_datatype}} = $self->bind_attribute_by_data_type($_->{sqlt_datatype}) || undef;
+ }
+ $cache->{$_->{sqlt_datatype}};
+ }
+ elsif ($sba_attrs and $_->{dbic_colname}) {
+ $sba_attrs->{$_->{dbic_colname}} || undef;
+ }
+ else {
+ undef; # always push something at this position
+ }
}
+ }
+
+ return \@attrs;
+}
- foreach my $data (@data) {
- my $ref = ref $data;
- $data = $ref && $ref ne 'ARRAY' ? ''.$data : $data; # stringify args (except arrayrefs)
+sub _execute {
+ my ($self, $op, $ident, @args) = @_;
+
+ my ($sql, $bind) = $self->_prep_for_execute($op, $ident, \@args);
+
+ shift->dbh_do( # retry over disconnects
+ '_dbh_execute',
+ $sql,
+ $bind,
+ $self->_dbi_attrs_for_bind($ident, $bind)
+ );
+}
- $sth->bind_param($placeholder_index, $data, $attributes);
- $placeholder_index++;
+sub _dbh_execute {
+ my ($self, undef, $sql, $bind, $bind_attrs) = @_;
+
+ $self->_query_start( $sql, $bind );
+ my $sth = $self->_sth($sql);
+
+ for my $i (0 .. $#$bind) {
+ if (ref $bind->[$i][1] eq 'SCALAR') { # any scalarrefs are assumed to be bind_inouts
+ $sth->bind_param_inout(
+ $i + 1, # bind params counts are 1-based
+ $bind->[$i][1],
+ $bind->[$i][0]{dbd_size} || $self->_max_column_bytesize($bind->[$i][0]), # size
+ $bind_attrs->[$i],
+ );
+ }
+ else {
+ $sth->bind_param(
+ $i + 1,
+ (ref $bind->[$i][1] and overload::Method($bind->[$i][1], '""'))
+ ? "$bind->[$i][1]"
+ : $bind->[$i][1]
+ ,
+ $bind_attrs->[$i],
+ );
}
}
# Can this fail without throwing an exception anyways???
my $rv = $sth->execute();
- $self->throw_exception($sth->errstr) if !$rv;
+ $self->throw_exception(
+ $sth->errstr || $sth->err || 'Unknown error: execute() returned false, but error flags were not set...'
+ ) if !$rv;
- $self->_query_end( $sql, @$bind );
+ $self->_query_end( $sql, $bind );
return (wantarray ? ($rv, $sth, @$bind) : $rv);
}
-sub _execute {
- my $self = shift;
- $self->dbh_do('_dbh_execute', @_); # retry over disconnects
+sub _prefetch_autovalues {
+ my ($self, $source, $to_insert) = @_;
+
+ my $colinfo = $source->columns_info;
+
+ my %values;
+ for my $col (keys %$colinfo) {
+ if (
+ $colinfo->{$col}{auto_nextval}
+ and
+ (
+ ! exists $to_insert->{$col}
+ or
+ ref $to_insert->{$col} eq 'SCALAR'
+ or
+ (ref $to_insert->{$col} eq 'REF' and ref ${$to_insert->{$col}} eq 'ARRAY')
+ )
+ ) {
+ $values{$col} = $self->_sequence_fetch(
+ 'NEXTVAL',
+ ( $colinfo->{$col}{sequence} ||=
+ $self->_dbh_get_autoinc_seq($self->_get_dbh, $source, $col)
+ ),
+ );
+ }
+ }
+
+ \%values;
}
sub insert {
my ($self, $source, $to_insert) = @_;
- my $ident = $source->from;
- my $bind_attributes = $self->source_bind_attributes($source);
+ my $prefetched_values = $self->_prefetch_autovalues($source, $to_insert);
+
+ # fuse the values, but keep a separate list of prefetched_values so that
+ # they can be fused once again with the final return
+ $to_insert = { %$to_insert, %$prefetched_values };
+
+ # FIXME - we seem to assume undef values as non-supplied. This is wrong.
+ # Investigate what does it take to s/defined/exists/
+ my $col_infos = $source->columns_info;
+ my %pcols = map { $_ => 1 } $source->primary_columns;
+ my (%retrieve_cols, $autoinc_supplied, $retrieve_autoinc_col);
+ for my $col ($source->columns) {
+ if ($col_infos->{$col}{is_auto_increment}) {
+ $autoinc_supplied ||= 1 if defined $to_insert->{$col};
+ $retrieve_autoinc_col ||= $col unless $autoinc_supplied;
+ }
- my $updated_cols = {};
+ # nothing to retrieve when explicit values are supplied
+ next if (defined $to_insert->{$col} and ! (
+ ref $to_insert->{$col} eq 'SCALAR'
+ or
+ (ref $to_insert->{$col} eq 'REF' and ref ${$to_insert->{$col}} eq 'ARRAY')
+ ));
+
+ # the 'scalar keys' is a trick to preserve the ->columns declaration order
+ $retrieve_cols{$col} = scalar keys %retrieve_cols if (
+ $pcols{$col}
+ or
+ $col_infos->{$col}{retrieve_on_insert}
+ );
+ };
- foreach my $col ( $source->columns ) {
- if ( !defined $to_insert->{$col} ) {
- my $col_info = $source->column_info($col);
+ local $self->{_autoinc_supplied_for_op} = $autoinc_supplied;
+ local $self->{_perform_autoinc_retrieval} = $retrieve_autoinc_col;
- if ( $col_info->{auto_nextval} ) {
- $updated_cols->{$col} = $to_insert->{$col} = $self->_sequence_fetch(
- 'nextval',
- $col_info->{sequence} ||
- $self->_dbh_get_autoinc_seq($self->_get_dbh, $source)
- );
- }
- }
+ my ($sqla_opts, @ir_container);
+ if (%retrieve_cols and $self->_use_insert_returning) {
+ $sqla_opts->{returning_container} = \@ir_container
+ if $self->_use_insert_returning_bound;
+
+ $sqla_opts->{returning} = [
+ sort { $retrieve_cols{$a} <=> $retrieve_cols{$b} } keys %retrieve_cols
+ ];
+ }
+
+ my ($rv, $sth) = $self->_execute('insert', $source, $to_insert, $sqla_opts);
+
+ my %returned_cols = %$to_insert;
+ if (my $retlist = $sqla_opts->{returning}) { # if IR is supported - we will get everything in one set
+ @ir_container = try {
+ local $SIG{__WARN__} = sub {};
+ my @r = $sth->fetchrow_array;
+ $sth->finish;
+ @r;
+ } unless @ir_container;
+
+ @returned_cols{@$retlist} = @ir_container if @ir_container;
}
+ else {
+ # pull in PK if needed and then everything else
+ if (my @missing_pri = grep { $pcols{$_} } keys %retrieve_cols) {
+
+ $self->throw_exception( "Missing primary key but Storage doesn't support last_insert_id" )
+ unless $self->can('last_insert_id');
+
+ my @pri_values = $self->last_insert_id($source, @missing_pri);
+
+ $self->throw_exception( "Can't get last insert id" )
+ unless (@pri_values == @missing_pri);
+
+ @returned_cols{@missing_pri} = @pri_values;
+ delete $retrieve_cols{$_} for @missing_pri;
+ }
+
+ # if there is more left to pull
+ if (%retrieve_cols) {
+ $self->throw_exception(
+ 'Unable to retrieve additional columns without a Primary Key on ' . $source->source_name
+ ) unless %pcols;
+
+ my @left_to_fetch = sort { $retrieve_cols{$a} <=> $retrieve_cols{$b} } keys %retrieve_cols;
- $self->_execute('insert' => [], $source, $bind_attributes, $to_insert);
+ my $cur = DBIx::Class::ResultSet->new($source, {
+ where => { map { $_ => $returned_cols{$_} } (keys %pcols) },
+ select => \@left_to_fetch,
+ })->cursor;
- return $updated_cols;
+ @returned_cols{@left_to_fetch} = $cur->next;
+
+ $self->throw_exception('Duplicate row returned for PK-search after fresh insert')
+ if scalar $cur->next;
+ }
+ }
+
+ return { %$prefetched_values, %returned_cols };
}
-## Currently it is assumed that all values passed will be "normal", i.e. not
-## scalar refs, or at least, all the same type as the first set, the statement is
-## only prepped once.
sub insert_bulk {
my ($self, $source, $cols, $data) = @_;
- my %colvalues;
- @colvalues{@$cols} = (0..$#$cols);
+ my @col_range = (0..$#$cols);
+
+ # FIXME - perhaps this is not even needed? does DBI stringify?
+ #
+ # forcibly stringify whatever is stringifiable
+ # ResultSet::populate() hands us a copy - safe to mangle
+ for my $r (0 .. $#$data) {
+ for my $c (0 .. $#{$data->[$r]}) {
+ $data->[$r][$c] = "$data->[$r][$c]"
+ if ( ref $data->[$r][$c] and overload::Method($data->[$r][$c], '""') );
+ }
+ }
+
+ my $colinfos = $source->columns_info($cols);
+
+ local $self->{_autoinc_supplied_for_op} =
+ (first { $_->{is_auto_increment} } values %$colinfos)
+ ? 1
+ : 0
+ ;
+
+ # get a slice type index based on first row of data
+ # a "column" in this context may refer to more than one bind value
+ # e.g. \[ '?, ?', [...], [...] ]
+ #
+ # construct the value type index - a description of values types for every
+ # per-column slice of $data:
+ #
+ # nonexistent - nonbind literal
+ # 0 - regular value
+ # [] of bindattrs - resolved attribute(s) of bind(s) passed via literal+bind \[] combo
+ #
+ # also construct the column hash to pass to the SQL generator. For plain
+ # (non literal) values - convert the members of the first row into a
+ # literal+bind combo, with extra positional info in the bind attr hashref.
+ # This will allow us to match the order properly, and is so contrived
+ # because a user-supplied literal/bind (or something else specific to a
+ # resultsource and/or storage driver) can inject extra binds along the
+ # way, so one can't rely on "shift positions" ordering at all. Also we
+ # can't just hand SQLA a set of some known "values" (e.g. hashrefs that
+ # can be later matched up by address), because we want to supply a real
+ # value on which perhaps e.g. datatype checks will be performed
+ my ($proto_data, $value_type_idx);
+ for my $i (@col_range) {
+ my $colname = $cols->[$i];
+ if (ref $data->[0][$i] eq 'SCALAR') {
+ # no bind value at all - no type
+
+ $proto_data->{$colname} = $data->[0][$i];
+ }
+ elsif (ref $data->[0][$i] eq 'REF' and ref ${$data->[0][$i]} eq 'ARRAY' ) {
+ # repack, so we don't end up mangling the original \[]
+ my ($sql, @bind) = @${$data->[0][$i]};
+
+ # normalization of user supplied stuff
+ my $resolved_bind = $self->_resolve_bindattrs(
+ $source, \@bind, $colinfos,
+ );
+
+ # store value-less (attrs only) bind info - we will be comparing all
+ # supplied binds against this for sanity
+ $value_type_idx->{$i} = [ map { $_->[0] } @$resolved_bind ];
+
+ $proto_data->{$colname} = \[ $sql, map { [
+ # inject slice order to use for $proto_bind construction
+ { %{$resolved_bind->[$_][0]}, _bind_data_slice_idx => $i }
+ =>
+ $resolved_bind->[$_][1]
+ ] } (0 .. $#bind)
+ ];
+ }
+ else {
+ $value_type_idx->{$i} = 0;
+
+ $proto_data->{$colname} = \[ '?', [
+ { dbic_colname => $colname, _bind_data_slice_idx => $i }
+ =>
+ $data->[0][$i]
+ ] ];
+ }
+ }
- for my $i (0..$#$cols) {
- my $first_val = $data->[0][$i];
- next unless ref $first_val eq 'SCALAR';
+ my ($sql, $proto_bind) = $self->_prep_for_execute (
+ 'insert',
+ $source,
+ [ $proto_data ],
+ );
- $colvalues{ $cols->[$i] } = $first_val;
+ if (! @$proto_bind and keys %$value_type_idx) {
+ # if the bindlist is empty and we had some dynamic binds, this means the
+ # storage ate them away (e.g. the NoBindVars component) and interpolated
+ # them directly into the SQL. This obviously can't be good for multi-inserts
+ $self->throw_exception('Cannot insert_bulk without support for placeholders');
}
- # check for bad data and stringify stringifiable objects
- my $bad_slice = sub {
- my ($msg, $col_idx, $slice_idx) = @_;
+ # sanity checks
+ # FIXME - devise a flag "no babysitting" or somesuch to shut this off
+ #
+ # use an error reporting closure for convenience (less to pass)
+ my $bad_slice_report_cref = sub {
+ my ($msg, $r_idx, $c_idx) = @_;
$self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s",
$msg,
- $cols->[$col_idx],
+ $cols->[$c_idx],
do {
- local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any
- Data::Dumper::Concise::Dumper({
- map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols)
+ require Data::Dumper::Concise;
+ local $Data::Dumper::Maxdepth = 5;
+ Data::Dumper::Concise::Dumper ({
+ map { $cols->[$_] =>
+ $data->[$r_idx][$_]
+ } @col_range
}),
}
);
};
- for my $datum_idx (0..$#$data) {
- my $datum = $data->[$datum_idx];
+ for my $col_idx (@col_range) {
+ my $reference_val = $data->[0][$col_idx];
- for my $col_idx (0..$#$cols) {
- my $val = $datum->[$col_idx];
- my $sqla_bind = $colvalues{ $cols->[$col_idx] };
- my $is_literal_sql = (ref $sqla_bind) eq 'SCALAR';
+ for my $row_idx (1..$#$data) { # we are comparing against what we got from [0] above, hence start from 1
+ my $val = $data->[$row_idx][$col_idx];
- if ($is_literal_sql) {
- if (not ref $val) {
- $bad_slice->('bind found where literal SQL expected', $col_idx, $datum_idx);
+ if (! exists $value_type_idx->{$col_idx}) { # literal no binds
+ if (ref $val ne 'SCALAR') {
+ $bad_slice_report_cref->(
+ "Incorrect value (expecting SCALAR-ref \\'$$reference_val')",
+ $row_idx,
+ $col_idx,
+ );
}
- elsif ((my $reftype = ref $val) ne 'SCALAR') {
- $bad_slice->("$reftype reference found where literal SQL expected",
- $col_idx, $datum_idx);
+ elsif ($$val ne $$reference_val) {
+ $bad_slice_report_cref->(
+ "Inconsistent literal SQL value (expecting \\'$$reference_val')",
+ $row_idx,
+ $col_idx,
+ );
}
- elsif ($$val ne $$sqla_bind){
- $bad_slice->("inconsistent literal SQL value, expecting: '$$sqla_bind'",
- $col_idx, $datum_idx);
+ }
+ elsif (! $value_type_idx->{$col_idx} ) { # regular non-literal value
+ if (ref $val eq 'SCALAR' or (ref $val eq 'REF' and ref $$val eq 'ARRAY') ) {
+ $bad_slice_report_cref->("Literal SQL found where a plain bind value is expected", $row_idx, $col_idx);
}
}
- elsif (my $reftype = ref $val) {
- require overload;
- if (overload::Method($val, '""')) {
- $datum->[$col_idx] = "".$val;
+ else { # binds from a \[], compare type and attrs
+ if (ref $val ne 'REF' or ref $$val ne 'ARRAY') {
+ $bad_slice_report_cref->(
+ "Incorrect value (expecting ARRAYREF-ref \\['${$reference_val}->[0]', ... ])",
+ $row_idx,
+ $col_idx,
+ );
}
- else {
- $bad_slice->("$reftype reference found where bind expected",
- $col_idx, $datum_idx);
+ # start drilling down and bail out early on identical refs
+ elsif (
+ $reference_val != $val
+ or
+ $$reference_val != $$val
+ ) {
+ if (${$val}->[0] ne ${$reference_val}->[0]) {
+ $bad_slice_report_cref->(
+ "Inconsistent literal/bind SQL (expecting \\['${$reference_val}->[0]', ... ])",
+ $row_idx,
+ $col_idx,
+ );
+ }
+ # need to check the bind attrs - a bind will happen only once for
+ # the entire dataset, so any changes further down will be ignored.
+ elsif (! Data::Compare::Compare(
+ $value_type_idx->{$col_idx},
+ [
+ map
+ { $_->[0] }
+ @{$self->_resolve_bindattrs(
+ $source, [ @{$$val}[1 .. $#$$val] ], $colinfos,
+ )}
+ ],
+ )) {
+ $bad_slice_report_cref->(
+ 'Differing bind attributes on literal/bind values not supported',
+ $row_idx,
+ $col_idx,
+ );
+ }
}
}
}
}
- my ($sql, $bind) = $self->_prep_for_execute (
- 'insert', undef, $source, [\%colvalues]
- );
- my @bind = @$bind;
-
- my $empty_bind = 1 if (not @bind) &&
- (grep { ref $_ eq 'SCALAR' } values %colvalues) == @$cols;
-
- if ((not @bind) && (not $empty_bind)) {
- $self->throw_exception(
- 'Cannot insert_bulk without support for placeholders'
- );
- }
-
- # neither _execute_array, nor _execute_inserts_with_no_binds are
- # atomic (even if _execute _array is a single call). Thus a safety
+ # neither _dbh_execute_for_fetch, nor _dbh_execute_inserts_with_no_binds
+ # are atomic (even if execute_for_fetch is a single call). Thus a safety
# scope guard
- my $guard = $self->txn_scope_guard unless $self->{transaction_depth} != 0;
+ my $guard = $self->txn_scope_guard;
- $self->_query_start( $sql, ['__BULK__'] );
- my $sth = $self->sth($sql);
+ $self->_query_start( $sql, @$proto_bind ? [[undef => '__BULK_INSERT__' ]] : () );
+ my $sth = $self->_sth($sql);
my $rv = do {
- if ($empty_bind) {
- # bind_param_array doesn't work if there are no binds
- $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data );
+ if (@$proto_bind) {
+ # proto bind contains the information on which pieces of $data to pull
+ # $cols is passed in only for prettier error-reporting
+ $self->_dbh_execute_for_fetch( $source, $sth, $proto_bind, $cols, $data );
}
else {
-# @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args
- $self->_execute_array( $source, $sth, \@bind, $cols, $data );
+ # bind_param_array doesn't work if there are no binds
+ $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data );
}
};
- $self->_query_end( $sql, ['__BULK__'] );
+ $self->_query_end( $sql, @$proto_bind ? [[ undef => '__BULK_INSERT__' ]] : () );
+ $guard->commit;
- $guard->commit if $guard;
-
- return (wantarray ? ($rv, $sth, @bind) : $rv);
+ return wantarray ? ($rv, $sth, @$proto_bind) : $rv;
}
-sub _execute_array {
- my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_;
+# execute_for_fetch is capable of returning data just fine (it means it
+# can be used for INSERT...RETURNING and UPDATE...RETURNING. Since this
+# is the void-populate fast-path we will just ignore this altogether
+# for the time being.
+sub _dbh_execute_for_fetch {
+ my ($self, $source, $sth, $proto_bind, $cols, $data) = @_;
- ## This must be an arrayref, else nothing works!
- my $tuple_status = [];
+ my @idx_range = ( 0 .. $#$proto_bind );
- ## Get the bind_attributes, if any exist
- my $bind_attributes = $self->source_bind_attributes($source);
+ # If we have any bind attributes to take care of, we will bind the
+ # proto-bind data (which will never be used by execute_for_fetch)
+ # However since column bindtypes are "sticky", this is sufficient
+ # to get the DBD to apply the bindtype to all values later on
- ## Bind the values and execute
- my $placeholder_index = 1;
+ my $bind_attrs = $self->_dbi_attrs_for_bind($source, $proto_bind);
- foreach my $bound (@$bind) {
+ for my $i (@idx_range) {
+ $sth->bind_param (
+ $i+1, # DBI bind indexes are 1-based
+ $proto_bind->[$i][1],
+ $bind_attrs->[$i],
+ ) if defined $bind_attrs->[$i];
+ }
- my $attributes = {};
- my ($column_name, $data_index) = @$bound;
+ # At this point $data slots named in the _bind_data_slice_idx of
+ # each piece of $proto_bind are either \[]s or plain values to be
+ # passed in. Construct the dispensing coderef. *NOTE* the order
+ # of $data will differ from this of the ?s in the SQL (due to
+ # alphabetical ordering by colname). We actually do want to
+ # preserve this behavior so that prepare_cached has a better
+ # chance of matching on unrelated calls
+ my %data_reorder = map { $proto_bind->[$_][0]{_bind_data_slice_idx} => $_ } @idx_range;
+
+ my $fetch_row_idx = -1; # saner loop this way
+ my $fetch_tuple = sub {
+ return undef if ++$fetch_row_idx > $#$data;
+
+ return [ map
+ { (ref $_ eq 'REF' and ref $$_ eq 'ARRAY')
+ ? map { $_->[-1] } @{$$_}[1 .. $#$$_]
+ : $_
+ }
+ map
+ { $data->[$fetch_row_idx][$_]}
+ sort
+ { $data_reorder{$a} <=> $data_reorder{$b} }
+ keys %data_reorder
+ ];
+ };
- if( $bind_attributes ) {
- $attributes = $bind_attributes->{$column_name}
- if defined $bind_attributes->{$column_name};
- }
+ my $tuple_status = [];
+ my ($rv, $err);
+ try {
+ $rv = $sth->execute_for_fetch(
+ $fetch_tuple,
+ $tuple_status,
+ );
+ }
+ catch {
+ $err = shift;
+ };
- my @data = map { $_->[$data_index] } @$data;
+ # Not all DBDs are create equal. Some throw on error, some return
+ # an undef $rv, and some set $sth->err - try whatever we can
+ $err = ($sth->errstr || 'UNKNOWN ERROR ($sth->errstr is unset)') if (
+ ! defined $err
+ and
+ ( !defined $rv or $sth->err )
+ );
- $sth->bind_param_array( $placeholder_index, [@data], $attributes );
- $placeholder_index++;
+ # Statement must finish even if there was an exception.
+ try {
+ $sth->finish
}
-
- my $rv = eval {
- $self->_dbh_execute_array($sth, $tuple_status, @extra);
+ catch {
+ $err = shift unless defined $err
};
- my $err = $@ || $sth->errstr;
-
-# Statement must finish even if there was an exception.
- eval { $sth->finish };
- $err = $@ unless $err;
- if ($err) {
+ if (defined $err) {
my $i = 0;
++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i];
$self->throw_exception("Unexpected populate error: $err")
if ($i > $#$tuple_status);
- $self->throw_exception(sprintf "%s for populate slice:\n%s",
+ require Data::Dumper::Concise;
+ $self->throw_exception(sprintf "execute_for_fetch() aborted with '%s' at populate slice:\n%s",
($tuple_status->[$i][1] || $err),
- Data::Dumper::Concise::Dumper({
- map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols)
- }),
+ Data::Dumper::Concise::Dumper( { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) } ),
);
}
- return $rv;
-}
-
-sub _dbh_execute_array {
- my ($self, $sth, $tuple_status, @extra) = @_;
- return $sth->execute_array({ArrayTupleStatus => $tuple_status});
+ return $rv;
}
sub _dbh_execute_inserts_with_no_binds {
my ($self, $sth, $count) = @_;
- eval {
+ my $err;
+ try {
my $dbh = $self->_get_dbh;
local $dbh->{RaiseError} = 1;
local $dbh->{PrintError} = 0;
$sth->execute foreach 1..$count;
+ }
+ catch {
+ $err = shift;
};
- my $exception = $@;
-# Make sure statement is finished even if there was an exception.
- eval { $sth->finish };
- $exception = $@ unless $exception;
+ # Make sure statement is finished even if there was an exception.
+ try {
+ $sth->finish
+ }
+ catch {
+ $err = shift unless defined $err;
+ };
- $self->throw_exception($exception) if $exception;
+ $self->throw_exception($err) if defined $err;
return $count;
}
sub update {
- my ($self, $source, @args) = @_;
-
- my $bind_attrs = $self->source_bind_attributes($source);
-
- return $self->_execute('update' => [], $source, $bind_attrs, @args);
+ #my ($self, $source, @args) = @_;
+ shift->_execute('update', @_);
}
sub delete {
- my ($self, $source, @args) = @_;
-
- my $bind_attrs = $self->source_bind_attributes($source);
-
- return $self->_execute('delete' => [], $source, $bind_attrs, @args);
-}
-
-# We were sent here because the $rs contains a complex search
-# which will require a subquery to select the correct rows
-# (i.e. joined or limited resultsets, or non-introspectable conditions)
-#
-# Generating a single PK column subquery is trivial and supported
-# by all RDBMS. However if we have a multicolumn PK, things get ugly.
-# Look at _multipk_update_delete()
-sub _subq_update_delete {
- my $self = shift;
- my ($rs, $op, $values) = @_;
-
- my $rsrc = $rs->result_source;
-
- # quick check if we got a sane rs on our hands
- my @pcols = $rsrc->primary_columns;
- unless (@pcols) {
- $self->throw_exception (
- sprintf (
- "You must declare primary key(s) on source '%s' (via set_primary_key) in order to update or delete complex resultsets",
- $rsrc->source_name || $rsrc->from
- )
- );
- }
-
- my $sel = $rs->_resolved_attrs->{select};
- $sel = [ $sel ] unless ref $sel eq 'ARRAY';
-
- if (
- join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols)
- ne
- join ("\x00", sort @$sel )
- ) {
- $self->throw_exception (
- '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys'
- );
- }
-
- if (@pcols == 1) {
- return $self->$op (
- $rsrc,
- $op eq 'update' ? $values : (),
- { $pcols[0] => { -in => $rs->as_query } },
- );
- }
-
- else {
- return $self->_multipk_update_delete (@_);
- }
-}
-
-# ANSI SQL does not provide a reliable way to perform a multicol-PK
-# resultset update/delete involving subqueries. So by default resort
-# to simple (and inefficient) delete_all style per-row opearations,
-# while allowing specific storages to override this with a faster
-# implementation.
-#
-sub _multipk_update_delete {
- return shift->_per_row_update_delete (@_);
-}
-
-# This is the default loop used to delete/update rows for multi PK
-# resultsets, and used by mysql exclusively (because it can't do anything
-# else).
-#
-# We do not use $row->$op style queries, because resultset update/delete
-# is not expected to cascade (this is what delete_all/update_all is for).
-#
-# There should be no race conditions as the entire operation is rolled
-# in a transaction.
-#
-sub _per_row_update_delete {
- my $self = shift;
- my ($rs, $op, $values) = @_;
-
- my $rsrc = $rs->result_source;
- my @pcols = $rsrc->primary_columns;
-
- my $guard = $self->txn_scope_guard;
-
- # emulate the return value of $sth->execute for non-selects
- my $row_cnt = '0E0';
-
- my $subrs_cur = $rs->cursor;
- my @all_pk = $subrs_cur->all;
- for my $pks ( @all_pk) {
-
- my $cond;
- for my $i (0.. $#pcols) {
- $cond->{$pcols[$i]} = $pks->[$i];
- }
-
- $self->$op (
- $rsrc,
- $op eq 'update' ? $values : (),
- $cond,
- );
-
- $row_cnt++;
- }
-
- $guard->commit;
-
- return $row_cnt;
+ #my ($self, $source, @args) = @_;
+ shift->_execute('delete', @_);
}
sub _select {
my $self = shift;
-
- # localization is neccessary as
- # 1) there is no infrastructure to pass this around before SQLA2
- # 2) _select_args sets it and _prep_for_execute consumes it
- my $sql_maker = $self->sql_maker;
- local $sql_maker->{_dbic_rs_attrs};
-
- return $self->_execute($self->_select_args(@_));
+ $self->_execute($self->_select_args(@_));
}
sub _select_args_to_query {
my $self = shift;
- # localization is neccessary as
- # 1) there is no infrastructure to pass this around before SQLA2
- # 2) _select_args sets it and _prep_for_execute consumes it
- my $sql_maker = $self->sql_maker;
- local $sql_maker->{_dbic_rs_attrs};
+ $self->throw_exception(
+ "Unable to generate limited query representation with 'software_limit' enabled"
+ ) if ($_[3]->{software_limit} and ($_[3]->{offset} or $_[3]->{rows}) );
- # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $order, $rows, $offset)
+ # my ($op, $ident, $select, $cond, $rs_attrs, $rows, $offset)
# = $self->_select_args($ident, $select, $cond, $attrs);
- my ($op, $bind, $ident, $bind_attrs, @args) =
+ my ($op, $ident, @args) =
$self->_select_args(@_);
- # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $order, $rows, $offset ]);
- my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args);
+ # my ($sql, $prepared_bind) = $self->_gen_sql_bind($op, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]);
+ my ($sql, $prepared_bind) = $self->_gen_sql_bind($op, $ident, \@args);
$prepared_bind ||= [];
return wantarray
- ? ($sql, $prepared_bind, $bind_attrs)
+ ? ($sql, $prepared_bind)
: \[ "($sql)", @$prepared_bind ]
;
}
sub _select_args {
my ($self, $ident, $select, $where, $attrs) = @_;
+ my $sql_maker = $self->sql_maker;
my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident);
- my $sql_maker = $self->sql_maker;
- $sql_maker->{_dbic_rs_attrs} = {
+ $attrs = {
%$attrs,
select => $select,
from => $ident,
where => $where,
$rs_alias && $alias2source->{$rs_alias}
- ? ( _source_handle => $alias2source->{$rs_alias}->handle )
+ ? ( _rsroot_rsrc => $alias2source->{$rs_alias} )
: ()
,
};
- # calculate bind_attrs before possible $ident mangling
- my $bind_attrs = {};
- for my $alias (keys %$alias2source) {
- my $bindtypes = $self->source_bind_attributes ($alias2source->{$alias}) || {};
- for my $col (keys %$bindtypes) {
-
- my $fqcn = join ('.', $alias, $col);
- $bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col};
-
- # Unqialified column names are nice, but at the same time can be
- # rather ambiguous. What we do here is basically go along with
- # the loop, adding an unqualified column slot to $bind_attrs,
- # alongside the fully qualified name. As soon as we encounter
- # another column by that name (which would imply another table)
- # we unset the unqualified slot and never add any info to it
- # to avoid erroneous type binding. If this happens the users
- # only choice will be to fully qualify his column name
-
- if (exists $bind_attrs->{$col}) {
- $bind_attrs->{$col} = {};
- }
- else {
- $bind_attrs->{$col} = $bind_attrs->{$fqcn};
- }
- }
+ # Sanity check the attributes (SQLMaker does it too, but
+ # in case of a software_limit we'll never reach there)
+ if (defined $attrs->{offset}) {
+ $self->throw_exception('A supplied offset attribute must be a non-negative integer')
+ if ( $attrs->{offset} =~ /\D/ or $attrs->{offset} < 0 );
}
- # adjust limits
- if (
- $attrs->{software_limit}
- ||
- $sql_maker->_default_limit_syntax eq "GenericSubQ"
- ) {
- $attrs->{software_limit} = 1;
+ if (defined $attrs->{rows}) {
+ $self->throw_exception("The rows attribute must be a positive integer if present")
+ if ( $attrs->{rows} =~ /\D/ or $attrs->{rows} <= 0 );
}
- else {
- $self->throw_exception("rows attribute must be positive if present")
- if (defined($attrs->{rows}) && !($attrs->{rows} > 0));
-
+ elsif ($attrs->{offset}) {
# MySQL actually recommends this approach. I cringe.
- $attrs->{rows} = 2**48 if not defined $attrs->{rows} and defined $attrs->{offset};
+ $attrs->{rows} = $sql_maker->__max_int;
}
my @limit;
#limited has_many
( $attrs->{rows} && keys %{$attrs->{collapse}} )
||
- # limited prefetch with RNO subqueries
- (
- $attrs->{rows}
- &&
- $sql_maker->limit_dialect eq 'RowNumberOver'
- &&
- $attrs->{_prefetch_select}
- &&
- @{$attrs->{_prefetch_select}}
- )
- ||
- # grouped prefetch
+ # grouped prefetch (to satisfy group_by == select)
( $attrs->{group_by}
&&
@{$attrs->{group_by}}
&&
- $attrs->{_prefetch_select}
- &&
- @{$attrs->{_prefetch_select}}
+ $attrs->{_prefetch_selector_range}
)
) {
($ident, $select, $where, $attrs)
= $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs);
}
-
- elsif (
- ($attrs->{rows} || $attrs->{offset})
- &&
- $sql_maker->limit_dialect eq 'RowNumberOver'
- &&
- (ref $ident eq 'ARRAY' && @$ident > 1) # indicates a join
- &&
- scalar $self->_parse_order_by ($attrs->{order_by})
- ) {
- # the RNO limit dialect above mangles the SQL such that the join gets lost
- # wrap a subquery here
-
- push @limit, delete @{$attrs}{qw/rows offset/};
-
- my $subq = $self->_select_args_to_query (
- $ident,
- $select,
- $where,
- $attrs,
- );
-
- $ident = {
- -alias => $attrs->{alias},
- -source_handle => $ident->[0]{-source_handle},
- $attrs->{alias} => $subq,
- };
-
- # all part of the subquery now
- delete @{$attrs}{qw/order_by group_by having/};
- $where = undef;
- }
-
elsif (! $attrs->{software_limit} ) {
- push @limit, $attrs->{rows}, $attrs->{offset};
+ push @limit, (
+ $attrs->{rows} || (),
+ $attrs->{offset} || (),
+ );
}
# try to simplify the joinmap further (prune unreferenced type-single joins)
- $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs);
+ if (
+ ref $ident
+ and
+ reftype $ident eq 'ARRAY'
+ and
+ @$ident != 1
+ ) {
+ $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs);
+ }
###
# This would be the point to deflate anything found in $where
# invoked, and that's just bad...
###
- my $order = { map
- { $attrs->{$_} ? ( $_ => $attrs->{$_} ) : () }
- (qw/order_by group_by having/ )
- };
-
- return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $order, @limit);
+ return ('select', $ident, $select, $where, $attrs, @limit);
}
# Returns a counting SELECT for a simple count
return { count => '*' };
}
-# Returns a SELECT which will end up in the subselect
-# There may or may not be a group_by, as the subquery
-# might have been called to accomodate a limit
-#
-# Most databases would be happy with whatever ends up
-# here, but some choke in various ways.
-#
-sub _subq_count_select {
- my ($self, $source, $rs_attrs) = @_;
- return $rs_attrs->{group_by} if $rs_attrs->{group_by};
-
- my @pcols = map { join '.', $rs_attrs->{alias}, $_ } ($source->primary_columns);
- return @pcols ? \@pcols : [ 1 ];
-}
-
sub source_bind_attributes {
- my ($self, $source) = @_;
-
- my $bind_attributes;
- foreach my $column ($source->columns) {
-
- my $data_type = $source->column_info($column)->{data_type} || '';
- $bind_attributes->{$column} = $self->bind_attribute_by_data_type($data_type)
- if $data_type;
- }
-
- return $bind_attributes;
+ shift->throw_exception(
+ 'source_bind_attributes() was never meant to be a callable public method - '
+ .'please contact the DBIC dev-team and describe your use case so that a reasonable '
+ .'solution can be provided'
+ ."\nhttp://search.cpan.org/dist/DBIx-Class/lib/DBIx/Class.pm#GETTING_HELP/SUPPORT"
+ );
}
=head2 select
return @row;
}
-=head2 sth
-
-=over 4
-
-=item Arguments: $sql
+=head2 sql_limit_dialect
-=back
-
-Returns a L<DBI> sth (statement handle) for the supplied SQL.
+This is an accessor for the default SQL limit dialect used by a particular
+storage driver. Can be overridden by supplying an explicit L</limit_dialect>
+to L<DBIx::Class::Schema/connect>. For a list of available limit dialects
+see L<DBIx::Class::SQLMaker::LimitDialects>.
=cut
# XXX You would think RaiseError would make this impossible,
# but apparently that's not true :(
- $self->throw_exception($dbh->errstr) if !$sth;
+ $self->throw_exception(
+ $dbh->errstr
+ ||
+ sprintf( "\$dbh->prepare() of '%s' through %s failed *silently* without "
+ .'an exception and/or setting $dbh->errstr',
+ length ($sql) > 20
+ ? substr($sql, 0, 20) . '...'
+ : $sql
+ ,
+ 'DBD::' . $dbh->{Driver}{Name},
+ )
+ ) if !$sth;
$sth;
}
sub sth {
+ carp_unique 'sth was mistakenly marked/documented as public, stop calling it (will be removed before DBIC v0.09)';
+ shift->_sth(@_);
+}
+
+sub _sth {
my ($self, $sql) = @_;
$self->dbh_do('_dbh_sth', $sql); # retry over disconnects
}
if ($dbh->can('column_info')) {
my %result;
- eval {
+ my $caught;
+ try {
my ($schema,$tab) = $table =~ /^(.+?)\.(.+)$/ ? ($1,$2) : (undef,$table);
my $sth = $dbh->column_info( undef,$schema, $tab, '%' );
$sth->execute();
$result{$col_name} = \%column_info;
}
+ } catch {
+ $caught = 1;
};
- return \%result if !$@ && scalar keys %result;
+ return \%result if !$caught && scalar keys %result;
}
my %result;
sub _dbh_last_insert_id {
my ($self, $dbh, $source, $col) = @_;
- my $id = eval { $dbh->last_insert_id (undef, undef, $source->name, $col) };
+ my $id = try { $dbh->last_insert_id (undef, undef, $source->name, $col) };
return $id if defined $id;
}
# Check if placeholders are supported at all
-sub _placeholders_supported {
+sub _determine_supports_placeholders {
my $self = shift;
my $dbh = $self->_get_dbh;
# some drivers provide a $dbh attribute (e.g. Sybase and $dbh->{syb_dynamic_supported})
# but it is inaccurate more often than not
- eval {
+ return try {
local $dbh->{PrintError} = 0;
local $dbh->{RaiseError} = 1;
$dbh->do('select ?', {}, 1);
+ 1;
+ }
+ catch {
+ 0;
};
- return $@ ? 0 : 1;
}
# Check if placeholders bound to non-string types throw exceptions
#
-sub _typeless_placeholders_supported {
+sub _determine_supports_typeless_placeholders {
my $self = shift;
my $dbh = $self->_get_dbh;
- eval {
+ return try {
local $dbh->{PrintError} = 0;
local $dbh->{RaiseError} = 1;
# this specifically tests a bind that is NOT a string
$dbh->do('select 1 where 1 = ?', {}, 1);
+ 1;
+ }
+ catch {
+ 0;
};
- return $@ ? 0 : 1;
}
=head2 sqlt_type
=cut
sub is_datatype_numeric {
- my ($self, $dt) = @_;
+ #my ($self, $dt) = @_;
- return 0 unless $dt;
+ return 0 unless $_[1];
- return $dt =~ /^ (?:
+ $_[1] =~ /^ (?:
numeric | int(?:eger)? | (?:tiny|small|medium|big)int | dec(?:imal)? | real | float | double (?: \s+ precision)? | (?:big)?serial
) $/ix;
}
sub create_ddl_dir {
my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_;
- if(!$dir || !-d $dir) {
+ unless ($dir) {
carp "No directory given, using ./\n";
- $dir = "./";
+ $dir = './';
+ } else {
+ -d $dir
+ or
+ (require File::Path and File::Path::make_path ("$dir")) # make_path does not like objects (i.e. Path::Class::Dir)
+ or
+ $self->throw_exception(
+ "Failed to create '$dir': " . ($! || $@ || 'error unknown')
+ );
}
+
+ $self->throw_exception ("Directory '$dir' does not exist\n") unless(-d $dir);
+
$databases ||= ['MySQL', 'SQLite', 'PostgreSQL'];
$databases = [ $databases ] if(ref($databases) ne 'ARRAY');
my $filename = $schema->ddl_filename($type, $version, $dir);
if(-f $filename)
{
+ # FIXME replace this block when a proper sane sql parser is available
my $file;
open($file, "<$filename")
or $self->throw_exception("Can't open $filename ($!)");
data => $schema,
);
- my @ret;
- my $wa = wantarray;
- if ($wa) {
- @ret = $tr->translate;
- }
- else {
- $ret[0] = $tr->translate;
- }
-
- $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error)
- unless (@ret && defined $ret[0]);
-
- return $wa ? @ret : $ret[0];
+ return preserve_context {
+ $tr->translate
+ } after => sub {
+ $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error)
+ unless defined $_[0];
+ };
}
+# FIXME deploy() currently does not accurately report sql errors
+# Will always return true while errors are warned
sub deploy {
my ($self, $schema, $type, $sqltargs, $dir) = @_;
my $deploy = sub {
my $line = shift;
- return if($line =~ /^--/);
return if(!$line);
+ return if($line =~ /^--/);
# next if($line =~ /^DROP/m);
return if($line =~ /^BEGIN TRANSACTION/m);
return if($line =~ /^COMMIT/m);
return if $line =~ /^\s+$/; # skip whitespace only
$self->_query_start($line);
- eval {
+ try {
# do a dbh_do cycle here, as we need some error checking in
# place (even though we will ignore errors)
$self->dbh_do (sub { $_[1]->do($line) });
+ } catch {
+ carp qq{$_ (running "${line}")};
};
- if ($@) {
- carp qq{$@ (running "${line}")};
- }
$self->_query_end($line);
};
- my @statements = $self->deployment_statements($schema, $type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } );
+ my @statements = $schema->deployment_statements($type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } );
if (@statements > 1) {
foreach my $statement (@statements) {
$deploy->( $statement );
}
}
elsif (@statements == 1) {
- foreach my $line ( split(";\n", $statements[0])) {
+ # split on single line comments and end of statements
+ foreach my $line ( split(/\s*--.*\n|;\n/, $statements[0])) {
$deploy->( $line );
}
}
=head2 datetime_parser_type
-Defines (returns) the datetime parser class - currently hardwired to
-L<DateTime::Format::MySQL>
-
-=cut
-
-sub datetime_parser_type { "DateTime::Format::MySQL"; }
+Defines the datetime parser class - currently defaults to L<DateTime::Format::MySQL>
=head2 build_datetime_parser
sub build_datetime_parser {
my $self = shift;
my $type = $self->datetime_parser_type(@_);
- $self->ensure_class_loaded ($type);
return $type;
}
This hook is to allow specific L<DBIx::Class::Storage> drivers to change the
way these aliases are named.
-The default behavior is C<"$relname_$join_count" if $join_count > 1>, otherwise
-C<"$relname">.
+The default behavior is C<< "$relname_$join_count" if $join_count > 1 >>,
+otherwise C<"$relname">.
=cut
return $alias;
}
-sub DESTROY {
- my $self = shift;
+# The size in bytes to use for DBI's ->bind_param_inout, this is the generic
+# version and it may be necessary to amend or override it for a specific storage
+# if such binds are necessary.
+sub _max_column_bytesize {
+ my ($self, $attr) = @_;
- $self->_verify_pid if $self->_dbh;
+ my $max_size;
- # some databases need this to stop spewing warnings
- if (my $dbh = $self->_dbh) {
- local $@;
- eval {
- %{ $dbh->{CachedKids} } = ();
- $dbh->disconnect;
- };
+ if ($attr->{sqlt_datatype}) {
+ my $data_type = lc($attr->{sqlt_datatype});
+
+ if ($attr->{sqlt_size}) {
+
+ # String/sized-binary types
+ if ($data_type =~ /^(?:
+ l? (?:var)? char(?:acter)? (?:\s*varying)?
+ |
+ (?:var)? binary (?:\s*varying)?
+ |
+ raw
+ )\b/x
+ ) {
+ $max_size = $attr->{sqlt_size};
+ }
+ # Other charset/unicode types, assume scale of 4
+ elsif ($data_type =~ /^(?:
+ national \s* character (?:\s*varying)?
+ |
+ nchar
+ |
+ univarchar
+ |
+ nvarchar
+ )\b/x
+ ) {
+ $max_size = $attr->{sqlt_size} * 4;
+ }
+ }
+
+ if (!$max_size and !$self->_is_lob_type($data_type)) {
+ $max_size = 100 # for all other (numeric?) datatypes
+ }
}
- $self->_dbh(undef);
+ $max_size || $self->_dbic_connect_attributes->{LongReadLen} || $self->_get_dbh->{LongReadLen} || 8000;
+}
+
+# Determine if a data_type is some type of BLOB
+sub _is_lob_type {
+ my ($self, $data_type) = @_;
+ $data_type && ($data_type =~ /lob|bfile|text|image|bytea|memo/i
+ || $data_type =~ /^long(?:\s+(?:raw|bit\s*varying|varbit|binary
+ |varchar|character\s*varying|nvarchar
+ |national\s*character\s*varying))?\z/xi);
+}
+
+sub _is_binary_lob_type {
+ my ($self, $data_type) = @_;
+ $data_type && ($data_type =~ /blob|bfile|image|bytea/i
+ || $data_type =~ /^long(?:\s+(?:raw|bit\s*varying|varbit|binary))?\z/xi);
+}
+
+sub _is_text_lob_type {
+ my ($self, $data_type) = @_;
+ $data_type && ($data_type =~ /^(?:clob|memo)\z/i
+ || $data_type =~ /^long(?:\s+(?:varchar|character\s*varying|nvarchar
+ |national\s*character\s*varying))\z/xi);
+}
+
+# Determine if a data_type is some type of a binary type
+sub _is_binary_type {
+ my ($self, $data_type) = @_;
+ $data_type && ($self->_is_binary_lob_type($data_type)
+ || $data_type =~ /(?:var)?(?:binary|bit|graphic)(?:\s*varying)?/i);
}
1;
DBIx::Class can do some wonderful magic with handling exceptions,
disconnections, and transactions when you use C<< AutoCommit => 1 >>
-(the default) combined with C<txn_do> for transaction support.
+(the default) combined with L<txn_do|DBIx::Class::Storage/txn_do> for
+transaction support.
If you set C<< AutoCommit => 0 >> in your connect info, then you are always
in an assumed transaction between commits, and you're telling us you'd
be with raw DBI.
-=head1 AUTHORS
-
-Matt S. Trout <mst@shadowcatsystems.co.uk>
+=head1 AUTHOR AND CONTRIBUTORS
-Andy Grundman <andy@hybridized.org>
+See L<AUTHOR|DBIx::Class/AUTHOR> and L<CONTRIBUTORS|DBIx::Class/CONTRIBUTORS> in DBIx::Class
=head1 LICENSE