X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=d07bb76d34b533aa7329dff0db75462e7e0130e9;hb=ea95892eb6a71366db32b04137c7f2ee3b4ef841;hp=2939f4abafbba53ee8a234c726d93d3e038439e7;hpb=4f95e7c0757bc68b71c84a52c2430502772b873f;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 2939f4a..d07bb76 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -7,24 +7,29 @@ use warnings; use base qw/DBIx::Class::Storage::DBIHacks DBIx::Class::Storage/; use mro 'c3'; -use Carp::Clan qw/^DBIx::Class/; +use Carp::Clan qw/^DBIx::Class|^Try::Tiny/; use DBI; use DBIx::Class::Storage::DBI::Cursor; use DBIx::Class::Storage::Statistics; -use Scalar::Util(); -use List::Util(); -use Data::Dumper::Concise(); -use Sub::Name (); +use Scalar::Util qw/refaddr weaken reftype blessed/; +use Data::Dumper::Concise 'Dumper'; +use Sub::Name 'subname'; +use Try::Tiny; +use File::Path 'make_path'; +use namespace::clean; -# what version of sqlt do we require if deploy() without a ddl_dir is invoked -# when changing also adjust the corresponding author_require in Makefile.PL -my $minimum_sqlt_version = '0.11002'; +# default cursor class, overridable in connect_info attributes +__PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor'); + +__PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class sql_limit_dialect/); +__PACKAGE__->sql_maker_class('DBIx::Class::SQLMaker'); -__PACKAGE__->mk_group_accessors('simple' => - qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid - _conn_tid transaction_depth _dbh_autocommit _driver_determined savepoints/ -); +__PACKAGE__->mk_group_accessors('simple' => qw/ + _connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined + _dbh _dbh_details _conn_pid _conn_tid _sql_maker _sql_maker_opts + transaction_depth _dbh_autocommit savepoints +/); # the values for these accessors are picked out (and deleted) from # the attribute hashref passed to connect_info @@ -35,17 +40,38 @@ my @storage_options = qw/ __PACKAGE__->mk_group_accessors('simple' => @storage_options); -# default cursor class, overridable in connect_info attributes -__PACKAGE__->cursor_class('DBIx::Class::Storage::DBI::Cursor'); +# capability definitions, using a 2-tiered accessor system +# The rationale is: +# +# A driver/user may define _use_X, which blindly without any checks says: +# "(do not) use this capability", (use_dbms_capability is an "inherited" +# type accessor) +# +# If _use_X is undef, _supports_X is then queried. This is a "simple" style +# accessor, which in turn calls _determine_supports_X, and stores the return +# in a special slot on the storage object, which is wiped every time a $dbh +# reconnection takes place (it is not guaranteed that upon reconnection we +# will get the same rdbms version). _determine_supports_X does not need to +# exist on a driver, as we ->can for it before calling. -__PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class/); -__PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks'); +my @capabilities = (qw/insert_returning placeholders typeless_placeholders join_optimizer/); +__PACKAGE__->mk_group_accessors( dbms_capability => map { "_supports_$_" } @capabilities ); +__PACKAGE__->mk_group_accessors( use_dbms_capability => map { "_use_$_" } (@capabilities ) ); +# on by default, not strictly a capability (pending rewrite) +__PACKAGE__->_use_join_optimizer (1); +sub _determine_supports_join_optimizer { 1 }; # Each of these methods need _determine_driver called before itself # in order to function reliably. This is a purely DRY optimization +# +# get_(use)_dbms_capability need to be called on the correct Storage +# class, as _use_X may be hardcoded class-wide, and _supports_X calls +# _determine_supports_X which obv. needs a correct driver as well my @rdbms_specific_methods = qw/ + deployment_statements sqlt_type + sql_maker build_datetime_parser datetime_parser_type @@ -55,21 +81,33 @@ my @rdbms_specific_methods = qw/ delete select select_single + + get_use_dbms_capability + get_dbms_capability + + _server_info + _get_server_version /; for my $meth (@rdbms_specific_methods) { my $orig = __PACKAGE__->can ($meth) - or next; + or die "$meth is not a ::Storage::DBI method!"; no strict qw/refs/; no warnings qw/redefine/; - *{__PACKAGE__ ."::$meth"} = Sub::Name::subname $meth => sub { - if (not $_[0]->_driver_determined) { + *{__PACKAGE__ ."::$meth"} = subname $meth => sub { + if (not $_[0]->_driver_determined and not $_[0]->{_in_determine_driver}) { $_[0]->_determine_driver; - goto $_[0]->can($meth); + + # This for some reason crashes and burns on perl 5.8.1 + # IFF the method ends up throwing an exception + #goto $_[0]->can ($meth); + + my $cref = $_[0]->can ($meth); + goto $cref; } - $orig->(@_); + goto $orig; }; } @@ -93,7 +131,7 @@ DBIx::Class::Storage::DBI - DBI storage handler ); $schema->resultset('Book')->search({ - written_on => $schema->storage->datetime_parser(DateTime->now) + written_on => $schema->storage->datetime_parser->format_datetime(DateTime->now) }); =head1 DESCRIPTION @@ -111,13 +149,97 @@ sub new { $new->transaction_depth(0); $new->_sql_maker_opts({}); + $new->_dbh_details({}); $new->{savepoints} = []; $new->{_in_dbh_do} = 0; $new->{_dbh_gen} = 0; + # read below to see what this does + $new->_arm_global_destructor; + $new; } +# This is hack to work around perl shooting stuff in random +# order on exit(). If we do not walk the remaining storage +# objects in an END block, there is a *small but real* chance +# of a fork()ed child to kill the parent's shared DBI handle, +# *before perl reaches the DESTROY in this package* +# Yes, it is ugly and effective. +{ + my %seek_and_destroy; + + sub _arm_global_destructor { + my $self = shift; + my $key = Scalar::Util::refaddr ($self); + $seek_and_destroy{$key} = $self; + Scalar::Util::weaken ($seek_and_destroy{$key}); + } + + END { + local $?; # just in case the DBI destructor changes it somehow + + # destroy just the object if not native to this process/thread + $_->_preserve_foreign_dbh for (grep + { defined $_ } + values %seek_and_destroy + ); + } +} + +sub DESTROY { + my $self = shift; + + # some databases spew warnings on implicit disconnect + local $SIG{__WARN__} = sub {}; + $self->_dbh(undef); +} + +sub _preserve_foreign_dbh { + my $self = shift; + + return unless $self->_dbh; + + $self->_verify_tid; + + return unless $self->_dbh; + + $self->_verify_pid; + +} + +# handle pid changes correctly - do not destroy parent's connection +sub _verify_pid { + my $self = shift; + + return if ( defined $self->_conn_pid and $self->_conn_pid == $$ ); + + $self->_dbh->{InactiveDestroy} = 1; + $self->_dbh(undef); + $self->{_dbh_gen}++; + + return; +} + +# very similar to above, but seems to FAIL if I set InactiveDestroy +sub _verify_tid { + my $self = shift; + + if ( ! defined $self->_conn_tid ) { + return; # no threads + } + elsif ( $self->_conn_tid == threads->tid ) { + return; # same thread + } + + #$self->_dbh->{InactiveDestroy} = 1; # why does t/51threads.t fail...? + $self->_dbh(undef); + $self->{_dbh_gen}++; + + return; +} + + =head2 connect_info This method is normally called by L, which @@ -195,7 +317,7 @@ for most DBDs. See L for details. In addition to the standard L L attributes, DBIx::Class recognizes the following connection options. These options can be mixed in with your other -L connection attributes, or placed in a seperate hashref +L connection attributes, or placed in a separate hashref (C<\%extra_attributes>) as shown above. Every time C is invoked, any previous settings for @@ -328,14 +450,13 @@ statement handles via L. =item limit_dialect -Sets the limit dialect. This is useful for JDBC-bridge among others -where the remote SQL-dialect cannot be determined by the name of the -driver alone. See also L. +Sets a specific SQL::Abstract::Limit-style limit dialect, overriding the +default L setting of the storage (if any). For a list +of available limit dialects see L. =item quote_char -Specifies what characters to use to quote table and column names. If -you use this you will want to specify L as well. +Specifies what characters to use to quote table and column names. C expects either a single character, in which case is it is placed on either side of the table/column name, or an arrayref of length @@ -346,14 +467,9 @@ SQL Server you should use C<< quote_char => [qw/[ ]/] >>. =item name_sep -This only needs to be used in conjunction with C, and is used to -specify the charecter that seperates elements (schemas, tables, columns) from -each other. In most cases this is simply a C<.>. - -The consequences of not supplying this value is that L -will assume DBIx::Class' uses of aliases to be complete column -names. The output will look like I<"me.name"> when it should actually -be I<"me"."name">. +This parameter is only useful in conjunction with C, and is used to +specify the character that separates elements (schemas, tables, columns) from +each other. If unspecified it defaults to the most commonly used C<.>. =item unsafe @@ -406,7 +522,7 @@ L 'postgres', 'my_pg_password', { AutoCommit => 1 }, - { quote_char => q{"}, name_sep => q{.} }, + { quote_char => q{"} }, ] ); @@ -451,13 +567,55 @@ L =cut sub connect_info { - my ($self, $info_arg) = @_; + my ($self, $info) = @_; - return $self->_connect_info if !$info_arg; + return $self->_connect_info if !$info; - my @args = @$info_arg; # take a shallow copy for further mutilation - $self->_connect_info([@args]); # copy for _connect_info + $self->_connect_info($info); # copy for _connect_info + + $info = $self->_normalize_connect_info($info) + if ref $info eq 'ARRAY'; + + for my $storage_opt (keys %{ $info->{storage_options} }) { + my $value = $info->{storage_options}{$storage_opt}; + + $self->$storage_opt($value); + } + + # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only + # the new set of options + $self->_sql_maker(undef); + $self->_sql_maker_opts({}); + + for my $sql_maker_opt (keys %{ $info->{sql_maker_options} }) { + my $value = $info->{sql_maker_options}{$sql_maker_opt}; + + $self->_sql_maker_opts->{$sql_maker_opt} = $value; + } + my %attrs = ( + %{ $self->_default_dbi_connect_attributes || {} }, + %{ $info->{attributes} || {} }, + ); + + my @args = @{ $info->{arguments} }; + + $self->_dbi_connect_info([@args, + %attrs && !(ref $args[0] eq 'CODE') ? \%attrs : ()]); + + # FIXME - dirty: + # save attributes them in a separate accessor so they are always + # introspectable, even in case of a CODE $dbhmaker + $self->_dbic_connect_attributes (\%attrs); + + return $self->_connect_info; +} + +sub _normalize_connect_info { + my ($self, $info_arg) = @_; + my %info; + + my @args = @$info_arg; # take a shallow copy for further mutilation # combine/pre-parse arguments depending on invocation style @@ -494,36 +652,23 @@ sub connect_info { @args = @args[0,1,2]; } - # Kill sql_maker/_sql_maker_opts, so we get a fresh one with only - # the new set of options - $self->_sql_maker(undef); - $self->_sql_maker_opts({}); + $info{arguments} = \@args; - if(keys %attrs) { - for my $storage_opt (@storage_options, 'cursor_class') { # @storage_options is declared at the top of the module - if(my $value = delete $attrs{$storage_opt}) { - $self->$storage_opt($value); - } - } - for my $sql_maker_opt (qw/limit_dialect quote_char name_sep/) { - if(my $opt_val = delete $attrs{$sql_maker_opt}) { - $self->_sql_maker_opts->{$sql_maker_opt} = $opt_val; - } - } - } + my @storage_opts = grep exists $attrs{$_}, + @storage_options, 'cursor_class'; - if (ref $args[0] eq 'CODE') { - # _connect() never looks past $args[0] in this case - %attrs = () - } else { - %attrs = ( - %{ $self->_default_dbi_connect_attributes || {} }, - %attrs, - ); - } + @{ $info{storage_options} }{@storage_opts} = + delete @attrs{@storage_opts} if @storage_opts; + + my @sql_maker_opts = grep exists $attrs{$_}, + qw/limit_dialect quote_char name_sep/; + + @{ $info{sql_maker_options} }{@sql_maker_opts} = + delete @attrs{@sql_maker_opts} if @sql_maker_opts; - $self->_dbi_connect_info([@args, keys %attrs ? \%attrs : ()]); - $self->_connect_info; + $info{attributes} = \%attrs if %attrs; + + return \%info; } sub _default_dbi_connect_attributes { @@ -604,39 +749,26 @@ sub dbh_do { my $dbh = $self->_get_dbh; - return $self->$code($dbh, @_) if $self->{_in_dbh_do} - || $self->{transaction_depth}; + return $self->$code($dbh, @_) + if ( $self->{_in_dbh_do} || $self->{transaction_depth} ); local $self->{_in_dbh_do} = 1; - my @result; - my $want_array = wantarray; + # take a ref instead of a copy, to preserve coderef @_ aliasing semantics + my $args = \@_; + return try { + $self->$code ($dbh, @$args); + } catch { + $self->throw_exception($_) if $self->connected; - eval { + # We were not connected - reconnect and retry, but let any + # exception fall right through this time + carp "Retrying $code after catching disconnected exception: $_" + if $ENV{DBIC_DBIRETRY_DEBUG}; - if($want_array) { - @result = $self->$code($dbh, @_); - } - elsif(defined $want_array) { - $result[0] = $self->$code($dbh, @_); - } - else { - $self->$code($dbh, @_); - } + $self->_populate_dbh; + $self->$code($self->_dbh, @$args); }; - - # ->connected might unset $@ - copy - my $exception = $@; - if(!$exception) { return $want_array ? @result : $result[0] } - - $self->throw_exception($exception) if $self->connected; - - # We were not connected - reconnect and retry, but let any - # exception fall right through this time - carp "Retrying $code after catching disconnected exception: $exception" - if $ENV{DBIC_DBIRETRY_DEBUG}; - $self->_populate_dbh; - $self->$code($self->_dbh, @_); } # This is basically a blend of dbh_do above and DBIx::Class::Storage::txn_do. @@ -649,8 +781,6 @@ sub txn_do { ref $coderef eq 'CODE' or $self->throw_exception ('$coderef must be a CODE reference'); - return $coderef->(@_) if $self->{transaction_depth} && ! $self->auto_savepoint; - local $self->{_in_dbh_do} = 1; my @result; @@ -658,30 +788,42 @@ sub txn_do { my $tried = 0; while(1) { - eval { - $self->_get_dbh; + my $exception; + # take a ref instead of a copy, to preserve coderef @_ aliasing semantics + my $args = \@_; + + try { $self->txn_begin; + my $txn_start_depth = $self->transaction_depth; if($want_array) { - @result = $coderef->(@_); + @result = $coderef->(@$args); } elsif(defined $want_array) { - $result[0] = $coderef->(@_); + $result[0] = $coderef->(@$args); } else { - $coderef->(@_); + $coderef->(@$args); + } + + my $delta_txn = $txn_start_depth - $self->transaction_depth; + if ($delta_txn == 0) { + $self->txn_commit; + } + elsif ($delta_txn != 1) { + # an off-by-one would mean we fired a rollback + carp "Unexpected reduction of transaction depth by $delta_txn after execution of $coderef"; } - $self->txn_commit; + } catch { + $exception = $_; }; - # ->connected might unset $@ - copy - my $exception = $@; - if(!$exception) { return $want_array ? @result : $result[0] } + if(! defined $exception) { return $want_array ? @result : $result[0] } - if($tried++ || $self->connected) { - eval { $self->txn_rollback }; - my $rollback_exception = $@; - if($rollback_exception) { + if($self->transaction_depth > 1 || $tried++ || $self->connected) { + my $rollback_exception; + try { $self->txn_rollback } catch { $rollback_exception = shift }; + if(defined $rollback_exception) { my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION"; $self->throw_exception($exception) # propagate nested rollback if $rollback_exception =~ /$exception_class/; @@ -697,7 +839,7 @@ sub txn_do { # We were not connected, and was first try - reconnect and retry # via the while loop carp "Retrying $coderef after catching disconnected exception: $exception" - if $ENV{DBIC_DBIRETRY_DEBUG}; + if $ENV{DBIC_TXNRETRY_DEBUG}; $self->_populate_dbh; } } @@ -722,6 +864,7 @@ sub disconnect { $self->_dbh_rollback unless $self->_dbh_autocommit; + %{ $self->_dbh->{CachedKids} } = (); $self->_dbh->disconnect; $self->_dbh(undef); $self->{_dbh_gen}++; @@ -759,8 +902,8 @@ sub with_deferred_fk_checks { =back -Verifies that the the current database handle is active and ready to execute -an SQL statement (i.e. the connection did not get stale, server is still +Verifies that the current database handle is active and ready to execute +an SQL statement (e.g. the connection did not get stale, server is still answering, etc.) This method is used internally by L. =cut @@ -778,19 +921,11 @@ sub connected { sub _seems_connected { my $self = shift; + $self->_preserve_foreign_dbh; + my $dbh = $self->_dbh or return 0; - if(defined $self->_conn_tid && $self->_conn_tid != threads->tid) { - $self->_dbh(undef); - $self->{_dbh_gen}++; - return 0; - } - else { - $self->_verify_pid; - return 0 if !$self->_dbh; - } - return $dbh->FETCH('Active'); } @@ -802,20 +937,6 @@ sub _ping { return $dbh->ping; } -# handle pid changes correctly -# NOTE: assumes $self->_dbh is a valid $dbh -sub _verify_pid { - my ($self) = @_; - - return if defined $self->_conn_pid && $self->_conn_pid == $$; - - $self->_dbh->{InactiveDestroy} = 1; - $self->_dbh(undef); - $self->{_dbh_gen}++; - - return; -} - sub ensure_connected { my ($self) = @_; @@ -829,7 +950,7 @@ sub ensure_connected { Returns a C<$dbh> - a data base handle of class L. The returned handle is guaranteed to be healthy by implicitly calling L, and if necessary performing a reconnection before returning. Keep in mind that this -is very B on some database engines. Consider using L +is very B on some database engines. Consider using L instead. =cut @@ -848,28 +969,43 @@ sub dbh { # this is the internal "get dbh or connect (don't check)" method sub _get_dbh { my $self = shift; - $self->_verify_pid if $self->_dbh; + $self->_preserve_foreign_dbh; $self->_populate_dbh unless $self->_dbh; return $self->_dbh; } -sub _sql_maker_args { - my ($self) = @_; - - return ( - bindtype=>'columns', - array_datatypes => 1, - limit_dialect => $self->_get_dbh, - %{$self->_sql_maker_opts} - ); -} - sub sql_maker { my ($self) = @_; unless ($self->_sql_maker) { my $sql_maker_class = $self->sql_maker_class; $self->ensure_class_loaded ($sql_maker_class); - $self->_sql_maker($sql_maker_class->new( $self->_sql_maker_args )); + + my %opts = %{$self->_sql_maker_opts||{}}; + my $dialect = + $opts{limit_dialect} + || + $self->sql_limit_dialect + || + do { + my $s_class = (ref $self) || $self; + carp ( + "Your storage class ($s_class) does not set sql_limit_dialect and you " + . 'have not supplied an explicit limit_dialect in your connection_info. ' + . 'DBIC will attempt to use the GenericSubQ dialect, which works on most ' + . 'databases but can be (and often is) painfully slow.' + ); + + 'GenericSubQ'; + } + ; + + $self->_sql_maker($sql_maker_class->new( + bindtype=>'columns', + array_datatypes => 1, + limit_dialect => $dialect, + name_sep => '.', + %opts, + )); } return $self->_sql_maker; } @@ -883,6 +1019,8 @@ sub _populate_dbh { my @info = @{$self->_dbi_connect_info || []}; $self->_dbh(undef); # in case ->connected failed we might get sent here + $self->_dbh_details({}); # reset everything we know + $self->_dbh($self->_connect(@info)); $self->_conn_pid($$); @@ -907,6 +1045,88 @@ sub _run_connection_actions { $self->_do_connection_actions(connect_call_ => $_) for @actions; } + + +sub set_use_dbms_capability { + $_[0]->set_inherited ($_[1], $_[2]); +} + +sub get_use_dbms_capability { + my ($self, $capname) = @_; + + my $use = $self->get_inherited ($capname); + return defined $use + ? $use + : do { $capname =~ s/^_use_/_supports_/; $self->get_dbms_capability ($capname) } + ; +} + +sub set_dbms_capability { + $_[0]->_dbh_details->{capability}{$_[1]} = $_[2]; +} + +sub get_dbms_capability { + my ($self, $capname) = @_; + + my $cap = $self->_dbh_details->{capability}{$capname}; + + unless (defined $cap) { + if (my $meth = $self->can ("_determine$capname")) { + $cap = $self->$meth ? 1 : 0; + } + else { + $cap = 0; + } + + $self->set_dbms_capability ($capname, $cap); + } + + return $cap; +} + +sub _server_info { + my $self = shift; + + my $info; + unless ($info = $self->_dbh_details->{info}) { + + $info = {}; + + my $server_version = try { $self->_get_server_version }; + + if (defined $server_version) { + $info->{dbms_version} = $server_version; + + my ($numeric_version) = $server_version =~ /^([\d\.]+)/; + my @verparts = split (/\./, $numeric_version); + if ( + @verparts + && + $verparts[0] <= 999 + ) { + # consider only up to 3 version parts, iff not more than 3 digits + my @use_parts; + while (@verparts && @use_parts < 3) { + my $p = shift @verparts; + last if $p > 999; + push @use_parts, $p; + } + push @use_parts, 0 while @use_parts < 3; + + $info->{normalized_dbms_version} = sprintf "%d.%03d%03d", @use_parts; + } + } + + $self->_dbh_details->{info} = $info; + } + + return $info; +} + +sub _get_server_version { + shift->_get_dbh->get_info(18); +} + sub _determine_driver { my ($self) = @_; @@ -922,22 +1142,27 @@ sub _determine_driver { } else { # if connect_info is a CODEREF, we have no choice but to connect if (ref $self->_dbi_connect_info->[0] && - Scalar::Util::reftype($self->_dbi_connect_info->[0]) eq 'CODE') { + reftype $self->_dbi_connect_info->[0] eq 'CODE') { $self->_populate_dbh; $driver = $self->_dbh->{Driver}{Name}; } else { # try to use dsn to not require being connected, the driver may still # force a connection in _rebless to determine version - ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i; + # (dsn may not be supplied at all if all we do is make a mock-schema) + my $dsn = $self->_dbi_connect_info->[0] || $ENV{DBI_DSN} || ''; + ($driver) = $dsn =~ /dbi:([^:]+):/i; + $driver ||= $ENV{DBI_DRIVER}; } } - my $storage_class = "DBIx::Class::Storage::DBI::${driver}"; - if ($self->load_optional_class($storage_class)) { - mro::set_mro($storage_class, 'c3'); - bless $self, $storage_class; - $self->_rebless(); + if ($driver) { + my $storage_class = "DBIx::Class::Storage::DBI::${driver}"; + if ($self->load_optional_class($storage_class)) { + mro::set_mro($storage_class, 'c3'); + bless $self, $storage_class; + $self->_rebless(); + } } } @@ -1024,40 +1249,52 @@ sub _connect { $DBI::connect_via = 'connect'; } - eval { + try { if(ref $info[0] eq 'CODE') { - $dbh = &{$info[0]} + $dbh = $info[0]->(); } else { $dbh = DBI->connect(@info); } - if($dbh && !$self->unsafe) { - my $weak_self = $self; - Scalar::Util::weaken($weak_self); - $dbh->{HandleError} = sub { + if (!$dbh) { + die $DBI::errstr; + } + + unless ($self->unsafe) { + + # this odd anonymous coderef dereference is in fact really + # necessary to avoid the unwanted effect described in perl5 + # RT#75792 + sub { + my $weak_self = $_[0]; + weaken $weak_self; + + $_[1]->{HandleError} = sub { if ($weak_self) { $weak_self->throw_exception("DBI Exception: $_[0]"); } else { # the handler may be invoked by something totally out of # the scope of DBIC - croak ("DBI Exception: $_[0]"); + croak ("DBI Exception (unhandled by DBIC, ::Schema GCed): $_[0]"); } - }; + }; + }->($self, $dbh); + $dbh->{ShowErrorStatement} = 1; $dbh->{RaiseError} = 1; $dbh->{PrintError} = 0; } + } + catch { + $self->throw_exception("DBI Connection failed: $_") + } + finally { + $DBI::connect_via = $old_connect_via if $old_connect_via; }; - $DBI::connect_via = $old_connect_via if $old_connect_via; - - $self->throw_exception("DBI Connection failed: " . ($@||$DBI::errstr)) - if !$dbh || $@; - $self->_dbh_autocommit($dbh->{AutoCommit}); - $dbh; } @@ -1141,14 +1378,27 @@ sub svp_rollback { } sub _svp_generate_name { - my ($self) = @_; - - return 'savepoint_'.scalar(@{ $self->{'savepoints'} }); + my ($self) = @_; + return 'savepoint_'.scalar(@{ $self->{'savepoints'} }); } sub txn_begin { my $self = shift; - if($self->{transaction_depth} == 0) { + + # this means we have not yet connected and do not know the AC status + # (e.g. coderef $dbh) + if (! defined $self->_dbh_autocommit) { + $self->ensure_connected; + } + # otherwise re-connect on pid changes, so + # that the txn_depth is adjusted properly + # the lightweight _get_dbh is good enoug here + # (only superficial handle check, no pings) + else { + $self->_get_dbh; + } + + if($self->transaction_depth == 0) { $self->debugobj->txn_begin() if $self->debug; $self->_dbh_begin_work; @@ -1188,6 +1438,9 @@ sub txn_commit { $self->svp_release if $self->auto_savepoint; } + else { + $self->throw_exception( 'Refusing to commit without a started transaction' ); + } } sub _dbh_commit { @@ -1200,7 +1453,7 @@ sub _dbh_commit { sub txn_rollback { my $self = shift; my $dbh = $self->_dbh; - eval { + try { if ($self->{transaction_depth} == 1) { $self->debugobj->txn_rollback() if ($self->debug); @@ -1218,15 +1471,17 @@ sub txn_rollback { else { die DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION->new; } - }; - if ($@) { - my $error = $@; - my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION"; - $error =~ /$exception_class/ and $self->throw_exception($error); - # ensure that a failed rollback resets the transaction depth - $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1; - $self->throw_exception($error); } + catch { + my $exception_class = "DBIx::Class::Storage::NESTED_ROLLBACK_EXCEPTION"; + + if ($_ !~ /$exception_class/) { + # ensure that a failed rollback resets the transaction depth + $self->{transaction_depth} = $self->_dbh_autocommit ? 0 : 1; + } + + $self->throw_exception($_) + }; } sub _dbh_rollback { @@ -1242,7 +1497,7 @@ sub _dbh_rollback { sub _prep_for_execute { my ($self, $op, $extra_bind, $ident, $args) = @_; - if( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) { + if( blessed $ident && $ident->isa("DBIx::Class::ResultSource") ) { $ident = $ident->from(); } @@ -1321,7 +1576,9 @@ sub _dbh_execute { # Can this fail without throwing an exception anyways??? my $rv = $sth->execute(); - $self->throw_exception($sth->errstr) if !$rv; + $self->throw_exception( + $sth->errstr || $sth->err || 'Unknown error: execute() returned false, but error flags were not set...' + ) if !$rv; $self->_query_end( $sql, @$bind ); @@ -1333,34 +1590,60 @@ sub _execute { $self->dbh_do('_dbh_execute', @_); # retry over disconnects } -sub insert { +sub _prefetch_insert_auto_nextvals { my ($self, $source, $to_insert) = @_; - my $ident = $source->from; - my $bind_attributes = $self->source_bind_attributes($source); - - my $updated_cols = {}; + my $upd = {}; foreach my $col ( $source->columns ) { if ( !defined $to_insert->{$col} ) { my $col_info = $source->column_info($col); if ( $col_info->{auto_nextval} ) { - $updated_cols->{$col} = $to_insert->{$col} = $self->_sequence_fetch( + $upd->{$col} = $to_insert->{$col} = $self->_sequence_fetch( 'nextval', - $col_info->{sequence} || - $self->_dbh_get_autoinc_seq($self->_get_dbh, $source) + $col_info->{sequence} ||= + $self->_dbh_get_autoinc_seq($self->_get_dbh, $source, $col) ); } } } - $self->_execute('insert' => [], $source, $bind_attributes, $to_insert); + return $upd; +} + +sub insert { + my $self = shift; + my ($source, $to_insert, $opts) = @_; + + my $updated_cols = $self->_prefetch_insert_auto_nextvals (@_); + + my $bind_attributes = $self->source_bind_attributes($source); + + my ($rv, $sth) = $self->_execute('insert' => [], $source, $bind_attributes, $to_insert, $opts); + + if ($opts->{returning}) { + my @ret_cols = @{$opts->{returning}}; + + my @ret_vals = try { + local $SIG{__WARN__} = sub {}; + my @r = $sth->fetchrow_array; + $sth->finish; + @r; + }; + + my %ret; + @ret{@ret_cols} = @ret_vals if (@ret_vals); + + $updated_cols = { + %$updated_cols, + %ret, + }; + } return $updated_cols; } -## Still not quite perfect, and EXPERIMENTAL ## Currently it is assumed that all values passed will be "normal", i.e. not ## scalar refs, or at least, all the same type as the first set, the statement is ## only prepped once. @@ -1385,9 +1668,9 @@ sub insert_bulk { $cols->[$col_idx], do { local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any - Data::Dumper::Concise::Dumper({ + Dumper { map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols) - }), + }, } ); }; @@ -1440,9 +1723,13 @@ sub insert_bulk { ); } - $self->_query_start( $sql, ['__BULK__'] ); - my $sth = $self->sth($sql); + # neither _execute_array, nor _execute_inserts_with_no_binds are + # atomic (even if _execute _array is a single call). Thus a safety + # scope guard + my $guard = $self->txn_scope_guard; + $self->_query_start( $sql, [ dummy => '__BULK_INSERT__' ] ); + my $sth = $self->sth($sql); my $rv = do { if ($empty_bind) { # bind_param_array doesn't work if there are no binds @@ -1454,7 +1741,9 @@ sub insert_bulk { } }; - $self->_query_end( $sql, ['__BULK__'] ); + $self->_query_end( $sql, [ dummy => '__BULK_INSERT__' ] ); + + $guard->commit; return (wantarray ? ($rv, $sth, @bind) : $rv); } @@ -1462,8 +1751,6 @@ sub insert_bulk { sub _execute_array { my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_; - my $guard = $self->txn_scope_guard unless $self->{transaction_depth} != 0; - ## This must be an arrayref, else nothing works! my $tuple_status = []; @@ -1485,20 +1772,34 @@ sub _execute_array { my @data = map { $_->[$data_index] } @$data; - $sth->bind_param_array( $placeholder_index, [@data], $attributes ); + $sth->bind_param_array( + $placeholder_index, + [@data], + (%$attributes ? $attributes : ()), + ); $placeholder_index++; } - my $rv = eval { - $self->_dbh_execute_array($sth, $tuple_status, @extra); + my ($rv, $err); + try { + $rv = $self->_dbh_execute_array($sth, $tuple_status, @extra); + } + catch { + $err = shift; + }; + + # Statement must finish even if there was an exception. + try { + $sth->finish + } + catch { + $err = shift unless defined $err }; - my $err = $@ || $sth->errstr; -# Statement must finish even if there was an exception. - eval { $sth->finish }; - $err = $@ unless $err; + $err = $sth->errstr + if (! defined $err and $sth->err); - if ($err) { + if (defined $err) { my $i = 0; ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i]; @@ -1507,14 +1808,10 @@ sub _execute_array { $self->throw_exception(sprintf "%s for populate slice:\n%s", ($tuple_status->[$i][1] || $err), - Data::Dumper::Concise::Dumper({ - map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) - }), + Dumper { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) }, ); } - $guard->commit if $guard; - return $rv; } @@ -1527,30 +1824,34 @@ sub _dbh_execute_array { sub _dbh_execute_inserts_with_no_binds { my ($self, $sth, $count) = @_; - my $guard = $self->txn_scope_guard unless $self->{transaction_depth} != 0; - - eval { + my $err; + try { my $dbh = $self->_get_dbh; local $dbh->{RaiseError} = 1; local $dbh->{PrintError} = 0; $sth->execute foreach 1..$count; + } + catch { + $err = shift; + } + finally { + # Make sure statement is finished even if there was an exception. + try { + $sth->finish + } + catch { + $err = shift unless defined $err; + }; }; - my $exception = $@; - -# Make sure statement is finished even if there was an exception. - eval { $sth->finish }; - $exception = $@ unless $exception; - $self->throw_exception($exception) if $exception; - - $guard->commit if $guard; + $self->throw_exception($err) if defined $err; return $count; } sub update { - my ($self, $source, @args) = @_; + my ($self, $source, @args) = @_; my $bind_attrs = $self->source_bind_attributes($source); @@ -1580,15 +1881,7 @@ sub _subq_update_delete { my $rsrc = $rs->result_source; # quick check if we got a sane rs on our hands - my @pcols = $rsrc->primary_columns; - unless (@pcols) { - $self->throw_exception ( - sprintf ( - "You must declare primary key(s) on source '%s' (via set_primary_key) in order to update or delete complex resultsets", - $rsrc->source_name || $rsrc->from - ) - ); - } + my @pcols = $rsrc->_pri_cols; my $sel = $rs->_resolved_attrs->{select}; $sel = [ $sel ] unless ref $sel eq 'ARRAY'; @@ -1641,7 +1934,7 @@ sub _per_row_update_delete { my ($rs, $op, $values) = @_; my $rsrc = $rs->result_source; - my @pcols = $rsrc->primary_columns; + my @pcols = $rsrc->_pri_cols; my $guard = $self->txn_scope_guard; @@ -1649,11 +1942,12 @@ sub _per_row_update_delete { my $row_cnt = '0E0'; my $subrs_cur = $rs->cursor; - while (my @pks = $subrs_cur->next) { + my @all_pk = $subrs_cur->all; + for my $pks ( @all_pk) { my $cond; for my $i (0.. $#pcols) { - $cond->{$pcols[$i]} = $pks[$i]; + $cond->{$pcols[$i]} = $pks->[$i]; } $self->$op ( @@ -1672,31 +1966,18 @@ sub _per_row_update_delete { sub _select { my $self = shift; - - # localization is neccessary as - # 1) there is no infrastructure to pass this around before SQLA2 - # 2) _select_args sets it and _prep_for_execute consumes it - my $sql_maker = $self->sql_maker; - local $sql_maker->{_dbic_rs_attrs}; - - return $self->_execute($self->_select_args(@_)); + $self->_execute($self->_select_args(@_)); } sub _select_args_to_query { my $self = shift; - # localization is neccessary as - # 1) there is no infrastructure to pass this around before SQLA2 - # 2) _select_args sets it and _prep_for_execute consumes it - my $sql_maker = $self->sql_maker; - local $sql_maker->{_dbic_rs_attrs}; - - # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $order, $rows, $offset) + # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $rs_attrs, $rows, $offset) # = $self->_select_args($ident, $select, $cond, $attrs); my ($op, $bind, $ident, $bind_attrs, @args) = $self->_select_args(@_); - # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $order, $rows, $offset ]); + # my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, [ $select, $cond, $rs_attrs, $rows, $offset ]); my ($sql, $prepared_bind) = $self->_prep_for_execute($op, $bind, $ident, \@args); $prepared_bind ||= []; @@ -1709,16 +1990,16 @@ sub _select_args_to_query { sub _select_args { my ($self, $ident, $select, $where, $attrs) = @_; + my $sql_maker = $self->sql_maker; my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident); - my $sql_maker = $self->sql_maker; - $sql_maker->{_dbic_rs_attrs} = { + $attrs = { %$attrs, select => $select, from => $ident, where => $where, - $rs_alias - ? ( _source_handle => $alias2source->{$rs_alias}->handle ) + $rs_alias && $alias2source->{$rs_alias} + ? ( _rsroot_source_handle => $alias2source->{$rs_alias}->handle ) : () , }; @@ -1750,29 +2031,32 @@ sub _select_args { } } - # adjust limits - if ( - $attrs->{software_limit} - || - $sql_maker->_default_limit_syntax eq "GenericSubQ" - ) { - $attrs->{software_limit} = 1; + # Sanity check the attributes (SQLMaker does it too, but + # in case of a software_limit we'll never reach there) + if (defined $attrs->{offset}) { + $self->throw_exception('A supplied offset attribute must be a non-negative integer') + if ( $attrs->{offset} =~ /\D/ or $attrs->{offset} < 0 ); } - else { - $self->throw_exception("rows attribute must be positive if present") - if (defined($attrs->{rows}) && !($attrs->{rows} > 0)); + $attrs->{offset} ||= 0; + if (defined $attrs->{rows}) { + $self->throw_exception("The rows attribute must be a positive integer if present") + if ( $attrs->{rows} =~ /\D/ or $attrs->{rows} <= 0 ); + } + elsif ($attrs->{offset}) { # MySQL actually recommends this approach. I cringe. - $attrs->{rows} = 2**48 if not defined $attrs->{rows} and defined $attrs->{offset}; + $attrs->{rows} = $sql_maker->__max_int; } my @limit; - # see if we need to tear the prefetch apart (either limited has_many or grouped prefetch) - # otherwise delegate the limiting to the storage, unless software limit was requested + # see if we need to tear the prefetch apart otherwise delegate the limiting to the + # storage, unless software limit was requested if ( + #limited has_many ( $attrs->{rows} && keys %{$attrs->{collapse}} ) || + # grouped prefetch (to satisfy group_by == select) ( $attrs->{group_by} && @{$attrs->{group_by}} @@ -1782,47 +2066,16 @@ sub _select_args { @{$attrs->{_prefetch_select}} ) ) { - ($ident, $select, $where, $attrs) = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs); } - - elsif ( - ($attrs->{rows} || $attrs->{offset}) - && - $sql_maker->limit_dialect eq 'RowNumberOver' - && - (ref $ident eq 'ARRAY' && @$ident > 1) # indicates a join - && - scalar $sql_maker->_order_by_chunks ($attrs->{order_by}) - ) { - # the RNO limit dialect above mangles the SQL such that the join gets lost - # wrap a subquery here - - push @limit, delete @{$attrs}{qw/rows offset/}; - - my $subq = $self->_select_args_to_query ( - $ident, - $select, - $where, - $attrs, - ); - - $ident = { - -alias => $attrs->{alias}, - -source_handle => $ident->[0]{-source_handle}, - $attrs->{alias} => $subq, - }; - - # all part of the subquery now - delete @{$attrs}{qw/order_by group_by having/}; - $where = undef; - } - elsif (! $attrs->{software_limit} ) { push @limit, $attrs->{rows}, $attrs->{offset}; } + # try to simplify the joinmap further (prune unreferenced type-single joins) + $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs); + ### # This would be the point to deflate anything found in $where # (and leave $attrs->{bind} intact). Problem is - inflators historically @@ -1833,12 +2086,7 @@ sub _select_args { # invoked, and that's just bad... ### - my $order = { map - { $attrs->{$_} ? ( $_ => $attrs->{$_} ) : () } - (qw/order_by group_by having/ ) - }; - - return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $order, @limit); + return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $attrs, @limit); } # Returns a counting SELECT for a simple count @@ -1850,20 +2098,6 @@ sub _count_select { return { count => '*' }; } -# Returns a SELECT which will end up in the subselect -# There may or may not be a group_by, as the subquery -# might have been called to accomodate a limit -# -# Most databases would be happy with whatever ends up -# here, but some choke in various ways. -# -sub _subq_count_select { - my ($self, $source, $rs_attrs) = @_; - return $rs_attrs->{group_by} if $rs_attrs->{group_by}; - - my @pcols = map { join '.', $rs_attrs->{alias}, $_ } ($source->primary_columns); - return @pcols ? \@pcols : [ 1 ]; -} sub source_bind_attributes { my ($self, $source) = @_; @@ -1910,6 +2144,13 @@ sub select_single { return @row; } +=head2 sql_limit_dialect + +This is an accessor for the default SQL limit dialect used by a particular +storage driver. Can be overriden by supplying an explicit L +to L. For a list of available limit dialects +see L. + =head2 sth =over 4 @@ -1947,7 +2188,8 @@ sub _dbh_columns_info_for { if ($dbh->can('column_info')) { my %result; - eval { + my $caught; + try { my ($schema,$tab) = $table =~ /^(.+?)\.(.+)$/ ? ($1,$2) : (undef,$table); my $sth = $dbh->column_info( undef,$schema, $tab, '%' ); $sth->execute(); @@ -1962,8 +2204,10 @@ sub _dbh_columns_info_for { $result{$col_name} = \%column_info; } + } catch { + $caught = 1; }; - return \%result if !$@ && scalar keys %result; + return \%result if !$caught && scalar keys %result; } my %result; @@ -2011,18 +2255,14 @@ Return the row id of the last insert. =cut sub _dbh_last_insert_id { - # All Storage's need to register their own _dbh_last_insert_id - # the old SQLite-based method was highly inappropriate + my ($self, $dbh, $source, $col) = @_; - my $self = shift; - my $class = ref $self; - $self->throw_exception (<last_insert_id (undef, undef, $source->name, $col) }; -No _dbh_last_insert_id() method found in $class. -Since the method of obtaining the autoincrement id of the last insert -operation varies greatly between different databases, this method must be -individually implemented for every storage class. -EOE + return $id if defined $id; + + my $class = ref $self; + $self->throw_exception ("No storage specific _dbh_last_insert_id() method implemented in $class, and the generic DBI::last_insert_id() failed"); } sub last_insert_id { @@ -2062,33 +2302,39 @@ sub _native_data_type { } # Check if placeholders are supported at all -sub _placeholders_supported { +sub _determine_supports_placeholders { my $self = shift; my $dbh = $self->_get_dbh; # some drivers provide a $dbh attribute (e.g. Sybase and $dbh->{syb_dynamic_supported}) # but it is inaccurate more often than not - eval { + return try { local $dbh->{PrintError} = 0; local $dbh->{RaiseError} = 1; $dbh->do('select ?', {}, 1); + 1; + } + catch { + 0; }; - return $@ ? 0 : 1; } # Check if placeholders bound to non-string types throw exceptions # -sub _typeless_placeholders_supported { +sub _determine_supports_typeless_placeholders { my $self = shift; my $dbh = $self->_get_dbh; - eval { + return try { local $dbh->{PrintError} = 0; local $dbh->{RaiseError} = 1; # this specifically tests a bind that is NOT a string $dbh->do('select 1 where 1 = ?', {}, 1); + 1; + } + catch { + 0; }; - return $@ ? 0 : 1; } =head2 sqlt_type @@ -2136,7 +2382,7 @@ sub is_datatype_numeric { } -=head2 create_ddl_dir (EXPERIMENTAL) +=head2 create_ddl_dir =over 4 @@ -2188,20 +2434,29 @@ hashref like the following { ignore_constraint_names => 0, # ... other options } -Note that this feature is currently EXPERIMENTAL and may not work correctly -across all databases, or fully handle complex relationships. - -WARNING: Please check all SQL files created, before applying them. +WARNING: You are strongly advised to check all SQL files created, before applying +them. =cut sub create_ddl_dir { my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_; - if(!$dir || !-d $dir) { + unless ($dir) { carp "No directory given, using ./\n"; - $dir = "./"; + $dir = './'; + } else { + -d $dir + or + make_path ("$dir") # make_path does not like objects (i.e. Path::Class::Dir) + or + $self->throw_exception( + "Failed to create '$dir': " . ($! || $@ || 'error unknow') + ); } + + $self->throw_exception ("Directory '$dir' does not exist\n") unless(-d $dir); + $databases ||= ['MySQL', 'SQLite', 'PostgreSQL']; $databases = [ $databases ] if(ref($databases) ne 'ARRAY'); @@ -2215,8 +2470,9 @@ sub create_ddl_dir { %{$sqltargs || {}} }; - $self->throw_exception("Can't create a ddl file without SQL::Translator: " . $self->_sqlt_version_error) - if !$self->_sqlt_version_ok; + unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy')) { + $self->throw_exception("Can't create a ddl file without " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') ); + } my $sqlt = SQL::Translator->new( $sqltargs ); @@ -2358,8 +2614,9 @@ sub deployment_statements { return join('', @rows); } - $self->throw_exception("Can't deploy without either SQL::Translator or a ddl_dir: " . $self->_sqlt_version_error ) - if !$self->_sqlt_version_ok; + unless (DBIx::Class::Optional::Dependencies->req_ok_for ('deploy') ) { + $self->throw_exception("Can't deploy without a ddl_dir or " . DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') ); + } # sources needs to be a parser arg, but for simplicty allow at top level # coming in @@ -2399,17 +2656,16 @@ sub deploy { return if($line =~ /^COMMIT/m); return if $line =~ /^\s+$/; # skip whitespace only $self->_query_start($line); - eval { + try { # do a dbh_do cycle here, as we need some error checking in # place (even though we will ignore errors) $self->dbh_do (sub { $_[1]->do($line) }); + } catch { + carp qq{$_ (running "${line}")}; }; - if ($@) { - carp qq{$@ (running "${line}")}; - } $self->_query_end($line); }; - my @statements = $self->deployment_statements($schema, $type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } ); + my @statements = $schema->deployment_statements($type, undef, $dir, { %{ $sqltargs || {} }, no_comments => 1 } ); if (@statements > 1) { foreach my $statement (@statements) { $deploy->( $statement ); @@ -2483,33 +2739,6 @@ sub lag_behind_master { return; } -# SQLT version handling -{ - my $_sqlt_version_ok; # private - my $_sqlt_version_error; # private - - sub _sqlt_version_ok { - if (!defined $_sqlt_version_ok) { - eval "use SQL::Translator $minimum_sqlt_version"; - if ($@) { - $_sqlt_version_ok = 0; - $_sqlt_version_error = $@; - } - else { - $_sqlt_version_ok = 1; - } - } - return $_sqlt_version_ok; - } - - sub _sqlt_version_error { - shift->_sqlt_version_ok unless defined $_sqlt_version_ok; - return $_sqlt_version_error; - } - - sub _sqlt_minimum_version { $minimum_sqlt_version }; -} - =head2 relname_to_table_alias =over 4 @@ -2524,8 +2753,8 @@ queries. This hook is to allow specific L drivers to change the way these aliases are named. -The default behavior is C<"$relname_$join_count" if $join_count > 1>, otherwise -C<"$relname">. +The default behavior is C<< "$relname_$join_count" if $join_count > 1 >>, +otherwise C<"$relname">. =cut @@ -2538,20 +2767,6 @@ sub relname_to_table_alias { return $alias; } -sub DESTROY { - my $self = shift; - - $self->_verify_pid if $self->_dbh; - - # some databases need this to stop spewing warnings - if (my $dbh = $self->_dbh) { - local $@; - eval { $dbh->disconnect }; - } - - $self->_dbh(undef); -} - 1; =head1 USAGE NOTES