X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=0de26f650fd9cecd1aa6fba3f99b3d53f590829a;hb=4e791ec7b35170f9d9c1dc805d35a1eb4a792cbb;hp=b2a92ef30d26574af385fee74c5b2f1e91fa1d6c;hpb=67e9c1149519bc8205932c135883740076640cce;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index b2a92ef..0de26f6 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -13,6 +13,12 @@ use DBIx::Class::Storage::DBI::Cursor; use DBIx::Class::Storage::Statistics; use Scalar::Util(); use List::Util(); +use Data::Dumper::Concise(); + +# what version of sqlt do we require if deploy() without a ddl_dir is invoked +# when changing also adjust the corresponding author_require in Makefile.PL +my $minimum_sqlt_version = '0.11002'; + __PACKAGE__->mk_group_accessors('simple' => qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid @@ -35,6 +41,38 @@ __PACKAGE__->mk_group_accessors('inherited' => qw/sql_maker_class/); __PACKAGE__->sql_maker_class('DBIx::Class::SQLAHacks'); +# Each of these methods need _determine_driver called before itself +# in order to function reliably. This is a purely DRY optimization +my @rdbms_specific_methods = qw/ + sqlt_type + build_datetime_parser + datetime_parser_type + + insert + insert_bulk + update + delete + select + select_single +/; + +for my $meth (@rdbms_specific_methods) { + + my $orig = __PACKAGE__->can ($meth) + or next; + + no strict qw/refs/; + no warnings qw/redefine/; + *{__PACKAGE__ ."::$meth"} = sub { + if (not $_[0]->_driver_determined) { + $_[0]->_determine_driver; + goto $_[0]->can($meth); + } + $orig->(@_); + }; +} + + =head1 NAME DBIx::Class::Storage::DBI - DBI storage handler @@ -44,7 +82,14 @@ DBIx::Class::Storage::DBI - DBI storage handler my $schema = MySchema->connect('dbi:SQLite:my.db'); $schema->storage->debug(1); - $schema->dbh_do("DROP TABLE authors"); + + my @stuff = $schema->storage->dbh_do( + sub { + my ($storage, $dbh, @args) = @_; + $dbh->do("DROP TABLE authors"); + }, + @column_list + ); $schema->resultset('Book')->search({ written_on => $schema->storage->datetime_parser(DateTime->now) @@ -556,7 +601,7 @@ sub dbh_do { my $self = shift; my $code = shift; - my $dbh = $self->_dbh; + my $dbh = $self->_get_dbh; return $self->$code($dbh, @_) if $self->{_in_dbh_do} || $self->{transaction_depth}; @@ -567,11 +612,6 @@ sub dbh_do { my $want_array = wantarray; eval { - $self->_verify_pid if $dbh; - if(!$self->_dbh) { - $self->_populate_dbh; - $dbh = $self->_dbh; - } if($want_array) { @result = $self->$code($dbh, @_); @@ -618,8 +658,7 @@ sub txn_do { my $tried = 0; while(1) { eval { - $self->_verify_pid if $self->_dbh; - $self->_populate_dbh if !$self->_dbh; + $self->_get_dbh; $self->txn_begin; if($want_array) { @@ -706,7 +745,6 @@ in MySQL's case disabled entirely. # Storage subclasses should override this sub with_deferred_fk_checks { my ($self, $sub) = @_; - $sub->(); } @@ -809,6 +847,7 @@ sub dbh { # this is the internal "get dbh or connect (don't check)" method sub _get_dbh { my $self = shift; + $self->_verify_pid if $self->_dbh; $self->_populate_dbh unless $self->_dbh; return $self->_dbh; } @@ -834,7 +873,9 @@ sub sql_maker { return $self->_sql_maker; } +# nothing to do by default sub _rebless {} +sub _init {} sub _populate_dbh { my ($self) = @_; @@ -869,18 +910,26 @@ sub _determine_driver { my ($self) = @_; if ((not $self->_driver_determined) && (not $self->{_in_determine_driver})) { - my $started_unconnected = 0; + my $started_connected = 0; local $self->{_in_determine_driver} = 1; if (ref($self) eq __PACKAGE__) { my $driver; if ($self->_dbh) { # we are connected $driver = $self->_dbh->{Driver}{Name}; + $started_connected = 1; } else { - # try to use dsn to not require being connected, the driver may still - # force a connection in _rebless to determine version - ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i; - $started_unconnected = 1; + # if connect_info is a CODEREF, we have no choice but to connect + if (ref $self->_dbi_connect_info->[0] && + Scalar::Util::reftype($self->_dbi_connect_info->[0]) eq 'CODE') { + $self->_populate_dbh; + $driver = $self->_dbh->{Driver}{Name}; + } + else { + # try to use dsn to not require being connected, the driver may still + # force a connection in _rebless to determine version + ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i; + } } my $storage_class = "DBIx::Class::Storage::DBI::${driver}"; @@ -893,8 +942,10 @@ sub _determine_driver { $self->_driver_determined(1); + $self->_init; # run driver-specific initializations + $self->_run_connection_actions - if $started_unconnected && defined $self->_dbh; + if !$started_connected && defined $self->_dbh; } } @@ -952,7 +1003,7 @@ sub _do_query { my @bind = map { [ undef, $_ ] } @do_args; $self->_query_start($sql, @bind); - $self->_dbh->do($sql, $attrs, @do_args); + $self->_get_dbh->do($sql, $attrs, @do_args); $self->_query_end($sql, @bind); } @@ -988,6 +1039,8 @@ sub _connect { $weak_self->throw_exception("DBI Exception: $_[0]"); } else { + # the handler may be invoked by something totally out of + # the scope of DBIC croak ("DBI Exception: $_[0]"); } }; @@ -1107,6 +1160,12 @@ sub txn_begin { sub _dbh_begin_work { my $self = shift; + + # if the user is utilizing txn_do - good for him, otherwise we need to + # ensure that the $dbh is healthy on BEGIN. + # We do this via ->dbh_do instead of ->dbh, so that the ->dbh "ping" + # will be replaced by a failure of begin_work itself (which will be + # then retried on reconnect) if ($self->{_in_dbh_do}) { $self->_dbh->begin_work; } else { @@ -1117,7 +1176,6 @@ sub _dbh_begin_work { sub txn_commit { my $self = shift; if ($self->{transaction_depth} == 1) { - my $dbh = $self->_dbh; $self->debugobj->txn_commit() if ($self->debug); $self->_dbh_commit; @@ -1133,7 +1191,9 @@ sub txn_commit { sub _dbh_commit { my $self = shift; - $self->_dbh->commit; + my $dbh = $self->_dbh + or $self->throw_exception('cannot COMMIT on a disconnected handle'); + $dbh->commit; } sub txn_rollback { @@ -1170,7 +1230,9 @@ sub txn_rollback { sub _dbh_rollback { my $self = shift; - $self->_dbh->rollback; + my $dbh = $self->_dbh + or $self->throw_exception('cannot ROLLBACK on a disconnected handle'); + $dbh->rollback; } # This used to be the top-half of _execute. It was split out to make it @@ -1273,12 +1335,6 @@ sub _execute { sub insert { my ($self, $source, $to_insert) = @_; -# redispatch to insert method of storage we reblessed into, if necessary - if (not $self->_driver_determined) { - $self->_determine_driver; - goto $self->can('insert'); - } - my $ident = $source->from; my $bind_attributes = $self->source_bind_attributes($source); @@ -1309,17 +1365,103 @@ sub insert { ## only prepped once. sub insert_bulk { my ($self, $source, $cols, $data) = @_; + my %colvalues; - my $table = $source->from; @colvalues{@$cols} = (0..$#$cols); - my ($sql, @bind) = $self->sql_maker->insert($table, \%colvalues); - $self->_determine_driver; + for my $i (0..$#$cols) { + my $first_val = $data->[0][$i]; + next unless ref $first_val eq 'SCALAR'; + + $colvalues{ $cols->[$i] } = $first_val; + } + + # check for bad data and stringify stringifiable objects + my $bad_slice = sub { + my ($msg, $col_idx, $slice_idx) = @_; + $self->throw_exception(sprintf "%s for column '%s' in populate slice:\n%s", + $msg, + $cols->[$col_idx], + do { + local $Data::Dumper::Maxdepth = 1; # don't dump objects, if any + Data::Dumper::Concise::Dumper({ + map { $cols->[$_] => $data->[$slice_idx][$_] } (0 .. $#$cols) + }), + } + ); + }; - $self->_query_start( $sql, @bind ); + for my $datum_idx (0..$#$data) { + my $datum = $data->[$datum_idx]; + + for my $col_idx (0..$#$cols) { + my $val = $datum->[$col_idx]; + my $sqla_bind = $colvalues{ $cols->[$col_idx] }; + my $is_literal_sql = (ref $sqla_bind) eq 'SCALAR'; + + if ($is_literal_sql) { + if (not ref $val) { + $bad_slice->('bind found where literal SQL expected', $col_idx, $datum_idx); + } + elsif ((my $reftype = ref $val) ne 'SCALAR') { + $bad_slice->("$reftype reference found where literal SQL expected", + $col_idx, $datum_idx); + } + elsif ($$val ne $$sqla_bind){ + $bad_slice->("inconsistent literal SQL value, expecting: '$$sqla_bind'", + $col_idx, $datum_idx); + } + } + elsif (my $reftype = ref $val) { + require overload; + if (overload::Method($val, '""')) { + $datum->[$col_idx] = "".$val; + } + else { + $bad_slice->("$reftype reference found where bind expected", + $col_idx, $datum_idx); + } + } + } + } + + my ($sql, $bind) = $self->_prep_for_execute ( + 'insert', undef, $source, [\%colvalues] + ); + my @bind = @$bind; + + my $empty_bind = 1 if (not @bind) && + (grep { ref $_ eq 'SCALAR' } values %colvalues) == @$cols; + + if ((not @bind) && (not $empty_bind)) { + $self->throw_exception( + 'Cannot insert_bulk without support for placeholders' + ); + } + + $self->_query_start( $sql, ['__BULK__'] ); my $sth = $self->sth($sql); -# @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args + my $rv = do { + if ($empty_bind) { + # bind_param_array doesn't work if there are no binds + $self->_dbh_execute_inserts_with_no_binds( $sth, scalar @$data ); + } + else { +# @bind = map { ref $_ ? ''.$_ : $_ } @bind; # stringify args + $self->_execute_array( $source, $sth, \@bind, $cols, $data ); + } + }; + + $self->_query_end( $sql, ['__BULK__'] ); + + return (wantarray ? ($rv, $sth, @bind) : $rv); +} + +sub _execute_array { + my ($self, $source, $sth, $bind, $cols, $data, @extra) = @_; + + my $guard = $self->txn_scope_guard unless $self->{transaction_depth} != 0; ## This must be an arrayref, else nothing works! my $tuple_status = []; @@ -1330,7 +1472,7 @@ sub insert_bulk { ## Bind the values and execute my $placeholder_index = 1; - foreach my $bound (@bind) { + foreach my $bound (@$bind) { my $attributes = {}; my ($column_name, $data_index) = @$bound; @@ -1345,60 +1487,143 @@ sub insert_bulk { $sth->bind_param_array( $placeholder_index, [@data], $attributes ); $placeholder_index++; } - my $rv = eval { $sth->execute_array({ArrayTupleStatus => $tuple_status}) }; - if (my $err = $@) { + + my $rv = eval { + $self->_dbh_execute_array($sth, $tuple_status, @extra); + }; + my $err = $@ || $sth->errstr; + +# Statement must finish even if there was an exception. + eval { $sth->finish }; + $err = $@ unless $err; + + if ($err) { my $i = 0; ++$i while $i <= $#$tuple_status && !ref $tuple_status->[$i]; - $self->throw_exception($sth->errstr || "Unexpected populate error: $err") + $self->throw_exception("Unexpected populate error: $err") if ($i > $#$tuple_status); - require Data::Dumper; - local $Data::Dumper::Terse = 1; - local $Data::Dumper::Indent = 1; - local $Data::Dumper::Useqq = 1; - local $Data::Dumper::Quotekeys = 0; - $self->throw_exception(sprintf "%s for populate slice:\n%s", - $tuple_status->[$i][1], - Data::Dumper::Dumper( - { map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) } - ), + ($tuple_status->[$i][1] || $err), + Data::Dumper::Concise::Dumper({ + map { $cols->[$_] => $data->[$i][$_] } (0 .. $#$cols) + }), ); } - $self->throw_exception($sth->errstr) if !$rv; - $self->_query_end( $sql, @bind ); - return (wantarray ? ($rv, $sth, @bind) : $rv); + $guard->commit if $guard; + + return $rv; +} + +sub _dbh_execute_array { + my ($self, $sth, $tuple_status, @extra) = @_; + + return $sth->execute_array({ArrayTupleStatus => $tuple_status}); +} + +sub _dbh_execute_inserts_with_no_binds { + my ($self, $sth, $count) = @_; + + my $guard = $self->txn_scope_guard unless $self->{transaction_depth} != 0; + + eval { + my $dbh = $self->_get_dbh; + local $dbh->{RaiseError} = 1; + local $dbh->{PrintError} = 0; + + $sth->execute foreach 1..$count; + }; + my $exception = $@; + +# Make sure statement is finished even if there was an exception. + eval { $sth->finish }; + $exception = $@ unless $exception; + + $self->throw_exception($exception) if $exception; + + $guard->commit if $guard; + + return $count; } sub update { my ($self, $source, @args) = @_; -# redispatch to update method of storage we reblessed into, if necessary - if (not $self->_driver_determined) { - $self->_determine_driver; - goto $self->can('update'); - } - - my $bind_attributes = $self->source_bind_attributes($source); + my $bind_attrs = $self->source_bind_attributes($source); - return $self->_execute('update' => [], $source, $bind_attributes, @args); + return $self->_execute('update' => [], $source, $bind_attrs, @args); } sub delete { - my $self = shift @_; - my $source = shift @_; - $self->_determine_driver; + my ($self, $source, @args) = @_; + my $bind_attrs = $self->source_bind_attributes($source); - return $self->_execute('delete' => [], $source, $bind_attrs, @_); + return $self->_execute('delete' => [], $source, $bind_attrs, @args); +} + +# Most databases do not allow aliasing of tables in UPDATE/DELETE. Thus +# a condition containing 'me' or other table prefixes will not work +# at all. What this code tries to do (badly) is introspect the condition +# and remove all column qualifiers. If it bails out early (returns undef) +# the calling code should try another approach (e.g. a subquery) +sub _strip_cond_qualifiers { + my ($self, $where) = @_; + + my $cond = {}; + + # No-op. No condition, we're updating/deleting everything + return $cond unless $where; + + if (ref $where eq 'ARRAY') { + $cond = [ + map { + my %hash; + foreach my $key (keys %{$_}) { + $key =~ /([^.]+)$/; + $hash{$1} = $_->{$key}; + } + \%hash; + } @$where + ]; + } + elsif (ref $where eq 'HASH') { + if ( (keys %$where) == 1 && ( (keys %{$where})[0] eq '-and' )) { + $cond->{-and} = []; + my @cond = @{$where->{-and}}; + for (my $i = 0; $i < @cond; $i++) { + my $entry = $cond[$i]; + my $hash; + if (ref $entry eq 'HASH') { + $hash = $self->_strip_cond_qualifiers($entry); + } + else { + $entry =~ /([^.]+)$/; + $hash->{$1} = $cond[++$i]; + } + push @{$cond->{-and}}, $hash; + } + } + else { + foreach my $key (keys %$where) { + $key =~ /([^.]+)$/; + $cond->{$1} = $where->{$key}; + } + } + } + else { + return undef; + } + + return $cond; } # We were sent here because the $rs contains a complex search # which will require a subquery to select the correct rows -# (i.e. joined or limited resultsets) +# (i.e. joined or limited resultsets, or non-introspectable conditions) # # Genarating a single PK column subquery is trivial and supported # by all RDBMS. However if we have a multicolumn PK, things get ugly. @@ -1409,16 +1634,7 @@ sub _subq_update_delete { my $rsrc = $rs->result_source; - # we already check this, but double check naively just in case. Should be removed soon - my $sel = $rs->_resolved_attrs->{select}; - $sel = [ $sel ] unless ref $sel eq 'ARRAY'; my @pcols = $rsrc->primary_columns; - if (@$sel != @pcols) { - $self->throw_exception ( - 'Subquery update/delete can not be called on resultsets selecting a' - .' number of columns different than the number of primary keys' - ); - } if (@pcols == 1) { return $self->$op ( @@ -1960,19 +2176,6 @@ sub _subq_count_select { return @pcols ? \@pcols : [ 1 ]; } -# -# Returns an ordered list of column names before they are used -# in a SELECT statement. By default simply returns the list -# passed in. -# -# This may be overridden in a specific storage when there are -# requirements such as moving BLOB columns to the end of the -# SELECT list. -sub _order_select_columns { - #my ($self, $source, $columns) = @_; - return @{$_[2]}; -} - sub source_bind_attributes { my ($self, $source) = @_; @@ -2206,14 +2409,7 @@ Returns the database driver name. =cut sub sqlt_type { - my ($self) = @_; - - if (not $self->_driver_determined) { - $self->_determine_driver; - goto $self->can ('sqlt_type'); - } - - $self->_get_dbh->{Driver}->{Name}; + shift->_get_dbh->{Driver}->{Name}; } =head2 bind_attribute_by_data_type @@ -2330,9 +2526,8 @@ sub create_ddl_dir { %{$sqltargs || {}} }; - $self->throw_exception(q{Can't create a ddl file without SQL::Translator 0.09003: '} - . $self->_check_sqlt_message . q{'}) - if !$self->_check_sqlt_version; + $self->throw_exception("Can't create a ddl file without SQL::Translator: " . $self->_sqlt_version_error) + if !$self->_sqlt_version_ok; my $sqlt = SQL::Translator->new( $sqltargs ); @@ -2474,9 +2669,8 @@ sub deployment_statements { return join('', @rows); } - $self->throw_exception(q{Can't deploy without SQL::Translator 0.09003: '} - . $self->_check_sqlt_message . q{'}) - if !$self->_check_sqlt_version; + $self->throw_exception("Can't deploy without either SQL::Translator or a ddl_dir: " . $self->_sqlt_version_error ) + if !$self->_sqlt_version_ok; # sources needs to be a parser arg, but for simplicty allow at top level # coming in @@ -2489,7 +2683,11 @@ sub deployment_statements { parser => 'SQL::Translator::Parser::DBIx::Class', data => $schema, ); - return $tr->translate; + + my $ret = $tr->translate + or $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error); + + return $ret; } sub deploy { @@ -2535,7 +2733,6 @@ Returns the datetime parser class sub datetime_parser { my $self = shift; return $self->{datetime_parser} ||= do { - $self->_populate_dbh unless $self->_dbh; $self->build_datetime_parser(@_); }; } @@ -2558,26 +2755,10 @@ See L sub build_datetime_parser { my $self = shift; my $type = $self->datetime_parser_type(@_); - eval "use ${type}"; - $self->throw_exception("Couldn't load ${type}: $@") if $@; + $self->ensure_class_loaded ($type); return $type; } -{ - my $_check_sqlt_version; # private - my $_check_sqlt_message; # private - sub _check_sqlt_version { - return $_check_sqlt_version if defined $_check_sqlt_version; - eval 'use SQL::Translator "0.09003"'; - $_check_sqlt_message = $@ || ''; - $_check_sqlt_version = !$@; - } - - sub _check_sqlt_message { - _check_sqlt_version if !defined $_check_sqlt_message; - $_check_sqlt_message; - } -} =head2 is_replicating @@ -2604,12 +2785,41 @@ sub lag_behind_master { return; } +# SQLT version handling +{ + my $_sqlt_version_ok; # private + my $_sqlt_version_error; # private + + sub _sqlt_version_ok { + if (!defined $_sqlt_version_ok) { + eval "use SQL::Translator $minimum_sqlt_version"; + if ($@) { + $_sqlt_version_ok = 0; + $_sqlt_version_error = $@; + } + else { + $_sqlt_version_ok = 1; + } + } + return $_sqlt_version_ok; + } + + sub _sqlt_version_error { + shift->_sqlt_version_ok unless defined $_sqlt_version_ok; + return $_sqlt_version_error; + } + + sub _sqlt_minimum_version { $minimum_sqlt_version }; +} + sub DESTROY { my $self = shift; + $self->_verify_pid if $self->_dbh; # some databases need this to stop spewing warnings if (my $dbh = $self->_dbh) { + local $@; eval { $dbh->disconnect }; }