X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=99a895e8e92527ac2d2babde5336a08ac202718a;hb=c6ec79000b160e7491d9ab9d95d6e69c473b0862;hp=7908e3fae4a26d1f400ea5bdbac6fdf0fdb18bad;hpb=f033dcbe3fd1c8e70a26151555fe1cf6fc55a37f;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 7908e3f..99a895e 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -9,11 +9,14 @@ use mro 'c3'; use DBIx::Class::Carp; use Scalar::Util qw/refaddr weaken reftype blessed/; -use List::Util qw/first/; use Context::Preserve 'preserve_context'; -use Try::Tiny; use SQL::Abstract qw(is_plain_value is_literal_value); -use DBIx::Class::_Util qw(quote_sub perlstring serialize detected_reinvoked_destructor); +use DBIx::Class::_Util qw( + quote_sub perlstring serialize dump_value + dbic_internal_try dbic_internal_catch + detected_reinvoked_destructor scope_guard + mkdir_p +); use namespace::clean; # default cursor class, overridable in connect_info attributes @@ -221,19 +224,33 @@ sub new { weaken ( $seek_and_destroy{ refaddr($_[0]) } = $_[0] ); + + # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage + # collected before leaving this scope. Depending on the code above, this + # may very well be just a preventive measure guarding future modifications + undef; } END { - local $?; # just in case the DBI destructor changes it somehow - # destroy just the object if not native to this process - $_->_verify_pid for (grep - { defined $_ } - values %seek_and_destroy - ); + if( + ! DBIx::Class::_ENV_::BROKEN_FORK + and + my @instances = grep { defined $_ } values %seek_and_destroy + ) { + local $?; # just in case the DBI destructor changes it somehow + + # disarm the handle if not native to this process (see comment on top) + $_->_verify_pid for @instances; + } + + # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage + # collected before leaving this scope. Depending on the code above, this + # may very well be just a preventive measure guarding future modifications + undef; } - sub CLONE { + sub DBIx::Class::__DBI_Storage_iThreads_handler__::CLONE { # As per DBI's recommendation, DBIC disconnects all handles as # soon as possible (DBIC will reconnect only on demand from within # the thread) @@ -242,13 +259,16 @@ sub new { for (@instances) { $_->_dbh(undef); - - $_->transaction_depth(0); - $_->savepoints([]); + $_->disconnect; # properly renumber existing refs $_->_arm_global_destructor } + + # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage + # collected before leaving this scope. Depending on the code above, this + # may very well be just a preventive measure guarding future modifications + undef; } } @@ -256,15 +276,18 @@ sub DESTROY { return if &detected_reinvoked_destructor; $_[0]->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK; + # some databases spew warnings on implicit disconnect + return unless defined $_[0]->_dbh; + local $SIG{__WARN__} = sub {}; $_[0]->_dbh(undef); + # not calling ->disconnect here - we are being destroyed - nothing to reset - # this op is necessary, since the very last perl runtime statement - # triggers a global destruction shootout, and the $SIG localization - # may very well be destroyed before perl actually gets to do the - # $dbh undef - 1; + # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage + # collected before leaving this scope. Depending on the code above, this + # may very well be just a preventive measure guarding future modifications + undef; } # handle pid changes correctly - do not destroy parent's connection @@ -275,11 +298,13 @@ sub _verify_pid { if( defined $pid and $pid != $$ and my $dbh = $_[0]->_dbh ) { $dbh->{InactiveDestroy} = 1; $_[0]->_dbh(undef); - $_[0]->transaction_depth(0); - $_[0]->savepoints([]); + $_[0]->disconnect; } - return; + # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage + # collected before leaving this scope. Depending on the code above, this + # may very well be just a preventive measure guarding future modifications + undef; } =head2 connect_info @@ -870,21 +895,41 @@ database is not in C mode. =cut sub disconnect { + my $self = shift; + + # this physical disconnect below might very well throw + # in order to unambiguously reset the state - do the cleanup in guard + + my $g = scope_guard { - if( my $dbh = $_[0]->_dbh ) { + defined( $self->_dbh ) + and dbic_internal_try { $self->_dbh->disconnect }; + + $self->_dbh(undef); + $self->_dbh_details({}); + $self->transaction_depth(undef); + $self->_dbh_autocommit(undef); + $self->savepoints([]); + + # FIXME - this needs reenabling with the proper "no reset on same DSN" check + #$self->_sql_maker(undef); # this may also end up being different + }; - $_[0]->_do_connection_actions(disconnect_call_ => $_) for ( - ( $_[0]->on_disconnect_call || () ), - $_[0]->_parse_connect_do ('on_disconnect_do') + if( $self->_dbh ) { + + $self->_do_connection_actions(disconnect_call_ => $_) for ( + ( $self->on_disconnect_call || () ), + $self->_parse_connect_do ('on_disconnect_do') ); # stops the "implicit rollback on disconnect" warning - $_[0]->_exec_txn_rollback unless $_[0]->_dbh_autocommit; - - %{ $dbh->{CachedKids} } = (); - $dbh->disconnect; - $_[0]->_dbh(undef); + $self->_exec_txn_rollback unless $self->_dbh_autocommit; } + + # Dummy NEXTSTATE ensuring the all temporaries on the stack are garbage + # collected before leaving this scope. Depending on the code above, this + # may very well be just a preventive measure guarding future modifications + undef; } =head2 with_deferred_fk_checks @@ -936,7 +981,15 @@ sub connected { sub _seems_connected { $_[0]->_verify_pid unless DBIx::Class::_ENV_::BROKEN_FORK; - ($_[0]->_dbh || return 0)->FETCH('Active'); + $_[0]->_dbh + and + $_[0]->_dbh->FETCH('Active') + and + return 1; + + # explicitly reset all state + $_[0]->disconnect; + return 0; } sub _ping { @@ -1038,12 +1091,9 @@ sub _init {} sub _populate_dbh { - $_[0]->_dbh(undef); # in case ->connected failed we might get sent here - - $_[0]->_dbh_details({}); # reset everything we know - - # FIXME - this needs reenabling with the proper "no reset on same DSN" check - #$_[0]->_sql_maker(undef); # this may also end up being different + # reset internal states + # also in case ->connected failed we might get sent here + $_[0]->disconnect; $_[0]->_dbh($_[0]->_connect); @@ -1053,7 +1103,7 @@ sub _populate_dbh { # Always set the transaction depth on connect, since # there is no transaction in progress by definition - $_[0]->{transaction_depth} = $_[0]->_dbh_autocommit ? 0 : 1; + $_[0]->transaction_depth( $_[0]->_dbh_autocommit ? 0 : 1 ); $_[0]->_run_connection_actions unless $_[0]->{_in_determine_driver}; @@ -1121,9 +1171,9 @@ sub _server_info { my $info = {}; - my $server_version = try { + my $server_version = dbic_internal_try { $self->_get_server_version - } catch { + } dbic_internal_catch { # driver determination *may* use this codepath # in which case we must rethrow $self->throw_exception($_) if $self->{_in_determine_driver}; @@ -1182,7 +1232,7 @@ sub _describe_connection { my $self = shift; my $drv; - try { + dbic_internal_try { $drv = $self->_extract_driver_from_connect_info; $self->ensure_connected; }; @@ -1196,7 +1246,7 @@ sub _describe_connection { DBIC_DRIVER => ref $self, $drv ? ( DBD => $drv, - DBD_VER => try { $drv->VERSION }, + DBD_VER => dbic_internal_try { $drv->VERSION }, ) : (), }; @@ -1237,7 +1287,7 @@ sub _describe_connection { ) { # some drivers barf on things they do not know about instead # of returning undef - my $v = try { $self->_dbh_get_info($inf) }; + my $v = dbic_internal_try { $self->_dbh_get_info($inf) }; next unless defined $v; #my $key = sprintf( '%s(%s)', $inf, $DBI::Const::GetInfoType::GetInfoType{$inf} ); @@ -1253,7 +1303,9 @@ sub _determine_driver { if ((not $self->_driver_determined) && (not $self->{_in_determine_driver})) { my $started_connected = 0; - local $self->{_in_determine_driver} = 1; + + local $self->{_in_determine_driver} = 1 + unless $self->{_in_determine_driver}; if (ref($self) eq __PACKAGE__) { my $driver; @@ -1268,7 +1320,17 @@ sub _determine_driver { if ($driver) { my $storage_class = "DBIx::Class::Storage::DBI::${driver}"; if ($self->load_optional_class($storage_class)) { - mro::set_mro($storage_class, 'c3'); + + no strict 'refs'; + mro::set_mro($storage_class, 'c3') if + ( + ${"${storage_class}::__INITIAL_MRO_UPON_DBIC_LOAD__"} + ||= mro::get_mro($storage_class) + ) + ne + 'c3' + ; + bless $self, $storage_class; $self->_rebless(); } @@ -1325,7 +1387,16 @@ sub _extract_driver_from_connect_info { # try to use dsn to not require being connected, the driver may still # force a connection later in _rebless to determine version # (dsn may not be supplied at all if all we do is make a mock-schema) - ($drv) = ($self->_dbi_connect_info->[0] || '') =~ /^dbi:([^:]+):/i; + # + # Use the same regex as the one used by DBI itself (even if the use of + # \w is odd given unicode): + # https://metacpan.org/source/TIMB/DBI-1.634/DBI.pm#L621 + # + # DO NOT use https://metacpan.org/source/TIMB/DBI-1.634/DBI.pm#L559-566 + # as there is a long-standing precedent of not loading DBI.pm until the + # very moment we are actually connecting + # + ($drv) = ($self->_dbi_connect_info->[0] || '') =~ /^dbi:(\w*)/i; $drv ||= $ENV{DBI_DRIVER}; } @@ -1367,19 +1438,17 @@ sub _get_rdbms_name { shift->_dbh_get_info('SQL_DBMS_NAME') } sub _warn_undetermined_driver { my ($self, $msg) = @_; - require Data::Dumper::Concise; - carp_once ($msg . ' While we will attempt to continue anyway, the results ' . 'are likely to be underwhelming. Please upgrade DBIC, and if this message ' . "does not go away, file a bugreport including the following info:\n" - . Data::Dumper::Concise::Dumper($self->_describe_connection) + . dump_value $self->_describe_connection ); } sub _do_connection_actions { my ($self, $method_prefix, $call, @args) = @_; - try { + dbic_internal_try { if (not ref($call)) { my $method = $method_prefix . $call; $self->$method(@args); @@ -1399,12 +1468,13 @@ sub _do_connection_actions { $self->throw_exception (sprintf ("Don't know how to process conection actions of type '%s'", ref($call)) ); } } - catch { + dbic_internal_catch { if ( $method_prefix =~ /^connect/ ) { # this is an on_connect cycle - we can't just throw while leaving # a handle in an undefined state in our storage object # kill it with fire and rethrow $self->_dbh(undef); + $self->disconnect; # the $dbh is gone, but we still need to reset the rest $self->throw_exception( $_[0] ); } else { @@ -1502,7 +1572,7 @@ sub _connect { }, '__DBIC__DBH__ERROR__HANDLER__'; }; - try { + dbic_internal_try { if(ref $info->[0] eq 'CODE') { $dbh = $info->[0]->(); } @@ -1548,7 +1618,7 @@ sub _connect { $dbh_error_handler_installer->($self, $dbh); } } - catch { + dbic_internal_catch { $self->throw_exception("DBI Connection failed: $_") }; @@ -1618,7 +1688,9 @@ sub _exec_txn_commit { sub txn_rollback { my $self = shift; - $self->throw_exception("Unable to txn_rollback() on a disconnected storage") + # do a minimal connectivity check due to weird shit like + # https://rt.cpan.org/Public/Bug/Display.html?id=62370 + $self->throw_exception("lost connection to storage") unless $self->_seems_connected; # esoteric case for folks using external $dbh handles @@ -1681,10 +1753,8 @@ sub _gen_sql_bind { and $op eq 'select' and - first { - length ref $_->[1] - and - blessed($_->[1]) + grep { + defined blessed($_->[1]) and $_->[1]->isa('DateTime') } @$bind @@ -1751,7 +1821,7 @@ sub _format_for_trace { map { defined( $_ && $_->[1] ) - ? qq{'$_->[1]'} + ? sprintf( "'%s'", "$_->[1]" ) # because overload : q{NULL} } @{$_[1] || []}; } @@ -1928,19 +1998,43 @@ sub insert { # they can be fused once again with the final return $to_insert = { %$to_insert, %$prefetched_values }; - # FIXME - we seem to assume undef values as non-supplied. This is wrong. - # Investigate what does it take to s/defined/exists/ my %pcols = map { $_ => 1 } $source->primary_columns; + my (%retrieve_cols, $autoinc_supplied, $retrieve_autoinc_col); + for my $col ($source->columns) { + + # first autoinc wins - this is why ->columns() in-order iteration is important + # + # FIXME - there ought to be a sanity-check for multiple is_auto_increment settings + # or something... + # if ($col_infos->{$col}{is_auto_increment}) { + + # FIXME - we seem to assume undef values as non-supplied. + # This is wrong. + # Investigate what does it take to s/defined/exists/ + # ( fails t/cdbi/copy.t amoong other things ) $autoinc_supplied ||= 1 if defined $to_insert->{$col}; + $retrieve_autoinc_col ||= $col unless $autoinc_supplied; } # nothing to retrieve when explicit values are supplied next if ( - defined $to_insert->{$col} and ! is_literal_value($to_insert->{$col}) + # FIXME - we seem to assume undef values as non-supplied. + # This is wrong. + # Investigate what does it take to s/defined/exists/ + # ( fails t/cdbi/copy.t amoong other things ) + defined $to_insert->{$col} + and + ( + # not a ref - cheaper to check before a call to is_literal_value() + ! length ref $to_insert->{$col} + or + # not a literal we *MAY* need to pull out ( see check below ) + ! is_literal_value( $to_insert->{$col} ) + ) ); # the 'scalar keys' is a trick to preserve the ->columns declaration order @@ -1951,6 +2045,35 @@ sub insert { ); }; + # corner case of a non-supplied PK which is *not* declared as autoinc + if ( + ! $autoinc_supplied + and + ! defined $retrieve_autoinc_col + and + # FIXME - first come-first serve, suboptimal... + ($retrieve_autoinc_col) = ( grep + { + $pcols{$_} + and + ! $col_infos->{$_}{retrieve_on_insert} + and + ! defined $col_infos->{$_}{is_auto_increment} + } + sort + { $retrieve_cols{$a} <=> $retrieve_cols{$b} } + keys %retrieve_cols + ) + ) { + carp_unique( + "Missing value for primary key column '$retrieve_autoinc_col' on " + . "@{[ $source->source_name ]} - perhaps you forgot to set its " + . "'is_auto_increment' attribute during add_columns()? Treating " + . "'$retrieve_autoinc_col' implicitly as an autoinc, and attempting " + . 'value retrieval' + ); + } + local $self->{_autoinc_supplied_for_op} = $autoinc_supplied; local $self->{_perform_autoinc_retrieval} = $retrieve_autoinc_col; @@ -1968,12 +2091,30 @@ sub insert { my %returned_cols = %$to_insert; if (my $retlist = $sqla_opts->{returning}) { # if IR is supported - we will get everything in one set - @ir_container = try { - local $SIG{__WARN__} = sub {}; - my @r = $sth->fetchrow_array; - $sth->finish; - @r; - } unless @ir_container; + + unless( @ir_container ) { + dbic_internal_try { + + # FIXME - need to investigate why Caelum silenced this in 4d4dc518 + local $SIG{__WARN__} = sub {}; + + @ir_container = $sth->fetchrow_array; + $sth->finish; + + } dbic_internal_catch { + # Evict the $sth from the cache in case we got here, since the finish() + # is crucial, at least on older Firebirds, possibly on other engines too + # + # It would be too complex to make this a proper subclass override, + # and besides we already take the try{} penalty, adding a catch that + # triggers infrequently is a no-brainer + # + if( my $kids = $self->_dbh->{CachedKids} ) { + $kids->{$_} == $sth and delete $kids->{$_} + for keys %$kids + } + }; + } @returned_cols{@$retlist} = @ir_container if @ir_container; } @@ -2127,13 +2268,12 @@ sub _insert_bulk { $msg, $cols->[$c_idx], do { - require Data::Dumper::Concise; local $Data::Dumper::Maxdepth = 5; - Data::Dumper::Concise::Dumper ({ + dump_value { map { $cols->[$_] => $data->[$r_idx][$_] } 0..$#$cols - }), + }; } ); }; @@ -2297,13 +2437,13 @@ sub _dbh_execute_for_fetch { my $tuple_status = []; my ($rv, $err); - try { + dbic_internal_try { $rv = $sth->execute_for_fetch( $fetch_tuple, $tuple_status, ); } - catch { + dbic_internal_catch { $err = shift; }; @@ -2316,10 +2456,10 @@ sub _dbh_execute_for_fetch { ); # Statement must finish even if there was an exception. - try { + dbic_internal_try { $sth->finish } - catch { + dbic_internal_catch { $err = shift unless defined $err }; @@ -2330,10 +2470,9 @@ sub _dbh_execute_for_fetch { $self->throw_exception("Unexpected populate error: $err") if ($i > $#$tuple_status); - require Data::Dumper::Concise; $self->throw_exception(sprintf "execute_for_fetch() aborted with '%s' at populate slice:\n%s", ($tuple_status->[$i][1] || $err), - Data::Dumper::Concise::Dumper( { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) } ), + dump_value { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) }, ); } @@ -2344,22 +2483,22 @@ sub _dbh_execute_inserts_with_no_binds { my ($self, $sth, $count) = @_; my $err; - try { + dbic_internal_try { my $dbh = $self->_get_dbh; local $dbh->{RaiseError} = 1; local $dbh->{PrintError} = 0; $sth->execute foreach 1..$count; } - catch { + dbic_internal_catch { $err = shift; }; # Make sure statement is finished even if there was an exception. - try { + dbic_internal_try { $sth->finish } - catch { + dbic_internal_catch { $err = shift unless defined $err; }; @@ -2427,21 +2566,9 @@ sub _select_args { where => $where, }; - # Sanity check the attributes (SQLMaker does it too, but - # in case of a software_limit we'll never reach there) - if (defined $attrs->{offset}) { - $self->throw_exception('A supplied offset attribute must be a non-negative integer') - if ( $attrs->{offset} =~ /[^0-9]/ or $attrs->{offset} < 0 ); - } - - if (defined $attrs->{rows}) { - $self->throw_exception("The rows attribute must be a positive integer if present") - if ( $attrs->{rows} =~ /[^0-9]/ or $attrs->{rows} <= 0 ); - } - elsif ($attrs->{offset}) { - # MySQL actually recommends this approach. I cringe. - $attrs->{rows} = $sql_maker->__max_int; - } + # MySQL actually recommends this approach. I cringe. + $attrs->{rows} ||= $sql_maker->__max_int + if $attrs->{offset}; # see if we will need to tear the prefetch apart to satisfy group_by == select # this is *extremely tricky* to get right, I am still not sure I did @@ -2467,7 +2594,7 @@ sub _select_args { and @{$attrs->{group_by}} and - my $grp_aliases = try { # try{} because $attrs->{from} may be unreadable + my $grp_aliases = dbic_internal_try { # internal_try{} because $attrs->{from} may be unreadable $self->_resolve_aliastypes_from_select_args({ from => $attrs->{from}, group_by => $attrs->{group_by} }) } ) { @@ -2584,7 +2711,7 @@ sub _dbh_columns_info_for { my %result; if (! DBIx::Class::_ENV_::STRESSTEST_COLUMN_INFO_UNAWARE_STORAGE and $dbh->can('column_info')) { - try { + dbic_internal_try { my ($schema,$tab) = $table =~ /^(.+?)\.(.+)$/ ? ($1,$2) : (undef,$table); my $sth = $dbh->column_info( undef,$schema, $tab, '%' ); $sth->execute(); @@ -2599,7 +2726,7 @@ sub _dbh_columns_info_for { $result{$col_name} = \%column_info; } - } catch { + } dbic_internal_catch { %result = (); }; @@ -2688,7 +2815,7 @@ Return the row id of the last insert. sub _dbh_last_insert_id { my ($self, $dbh, $source, $col) = @_; - my $id = try { $dbh->last_insert_id (undef, undef, $source->name, $col) }; + my $id = dbic_internal_try { $dbh->last_insert_id (undef, undef, $source->name, $col) }; return $id if defined $id; @@ -2739,15 +2866,15 @@ sub _determine_supports_placeholders { # some drivers provide a $dbh attribute (e.g. Sybase and $dbh->{syb_dynamic_supported}) # but it is inaccurate more often than not - return try { + ( dbic_internal_try { local $dbh->{PrintError} = 0; local $dbh->{RaiseError} = 1; $dbh->do('select ?', {}, 1); 1; - } - catch { - 0; - }; + } ) + ? 1 + : 0 + ; } # Check if placeholders bound to non-string types throw exceptions @@ -2756,16 +2883,16 @@ sub _determine_supports_typeless_placeholders { my $self = shift; my $dbh = $self->_get_dbh; - return try { + ( dbic_internal_try { local $dbh->{PrintError} = 0; local $dbh->{RaiseError} = 1; # this specifically tests a bind that is NOT a string $dbh->do('select 1 where 1 = ?', {}, 1); 1; - } - catch { - 0; - }; + } ) + ? 1 + : 0 + ; } =head2 sqlt_type @@ -2875,20 +3002,18 @@ them. sub create_ddl_dir { my ($self, $schema, $databases, $version, $dir, $preversion, $sqltargs) = @_; - unless ($dir) { + require DBIx::Class::Optional::Dependencies; + if (my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('deploy')) { + $self->throw_exception("Can't create a ddl file without $missing"); + } + + if (!$dir) { carp "No directory given, using ./\n"; $dir = './'; - } else { - -d $dir - or - (require File::Path and File::Path::mkpath (["$dir"])) # mkpath does not like objects (i.e. Path::Class::Dir) - or - $self->throw_exception( - "Failed to create '$dir': " . ($! || $@ || 'error unknown') - ); } - - $self->throw_exception ("Directory '$dir' does not exist\n") unless(-d $dir); + else { + mkdir_p( $dir ) unless -d $dir; + } $databases ||= ['MySQL', 'SQLite', 'PostgreSQL']; $databases = [ $databases ] if(ref($databases) ne 'ARRAY'); @@ -2904,10 +3029,6 @@ sub create_ddl_dir { %{$sqltargs || {}} }; - if (my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('deploy')) { - $self->throw_exception("Can't create a ddl file without $missing"); - } - my $sqlt = SQL::Translator->new( $sqltargs ); $sqlt->parser('SQL::Translator::Parser::DBIx::Class'); @@ -3046,6 +3167,11 @@ See L for a list of values for C<$sqlt_args>. sub deployment_statements { my ($self, $schema, $type, $version, $dir, $sqltargs) = @_; + + $self->throw_exception( + 'Calling deployment_statements() in void context makes no sense' + ) unless defined wantarray; + $type ||= $self->sqlt_type; $version ||= $schema->schema_version || '1.x'; $dir ||= './'; @@ -3061,6 +3187,7 @@ sub deployment_statements { return join('', @rows); } + require DBIx::Class::Optional::Dependencies; if (my $missing = DBIx::Class::Optional::Dependencies->req_missing_for ('deploy') ) { $self->throw_exception("Can't deploy without a pregenerated 'ddl_dir' directory or $missing"); } @@ -3101,11 +3228,11 @@ sub deploy { return if($line =~ /^COMMIT/m); return if $line =~ /^\s+$/; # skip whitespace only $self->_query_start($line); - try { + dbic_internal_try { # do a dbh_do cycle here, as we need some error checking in # place (even though we will ignore errors) $self->dbh_do (sub { $_[1]->do($line) }); - } catch { + } dbic_internal_catch { carp qq{$_ (running "${line}")}; }; $self->_query_end($line);