X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=45afefcfd2e7158b64bf46eb70be92fc92ea74bb;hb=5efba7fcc89e113c60d78fa246a0217c405ea1fc;hp=e9fab0b222f1d827c4db47b5563e04c24edaa1d9;hpb=a9bac98fc664ce08e085b230a3a8d79deee44727;p=dbsrgits%2FDBIx-Class.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index e9fab0b..45afefc 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -12,9 +12,11 @@ use DBIx::Class::Exception; use Scalar::Util qw/refaddr weaken reftype blessed/; use List::Util qw/first/; use Sub::Name 'subname'; +use Context::Preserve 'preserve_context'; use Try::Tiny; use overload (); use Data::Compare (); # no imports!!! guard against insane architecture +use DBI::Const::GetInfoType (); # no import of retarded global hash use namespace::clean; # default cursor class, overridable in connect_info attributes @@ -34,6 +36,7 @@ __PACKAGE__->sql_name_sep('.'); __PACKAGE__->mk_group_accessors('simple' => qw/ _connect_info _dbi_connect_info _dbic_connect_attributes _driver_determined _dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts _dbh_autocommit + _perform_autoinc_retrieval _autoinc_supplied_for_op /); # the values for these accessors are picked out (and deleted) from @@ -62,8 +65,12 @@ __PACKAGE__->mk_group_accessors('simple' => @storage_options); my @capabilities = (qw/ insert_returning insert_returning_bound + + multicolumn_in + placeholders typeless_placeholders + join_optimizer /); __PACKAGE__->mk_group_accessors( dbms_capability => map { "_supports_$_" } @capabilities ); @@ -226,6 +233,7 @@ sub DESTROY { my $self = shift; # some databases spew warnings on implicit disconnect + $self->_verify_pid; local $SIG{__WARN__} = sub {}; $self->_dbh(undef); @@ -774,37 +782,28 @@ Example: sub dbh_do { my $self = shift; - my $code = shift; - - my $dbh = $self->_get_dbh; + my $run_target = shift; - return $self->$code($dbh, @_) - if ( $self->{_in_do_block} || $self->{transaction_depth} ); - - local $self->{_in_do_block} = 1; + # short circuit when we know there is no need for a runner + # + # FIXME - asumption may be wrong + # the rationale for the txn_depth check is that if this block is a part + # of a larger transaction, everything up to that point is screwed anyway + return $self->$run_target($self->_get_dbh, @_) + if $self->{_in_do_block} or $self->transaction_depth; - # take a ref instead of a copy, to preserve coderef @_ aliasing semantics my $args = \@_; - try { - $self->$code ($dbh, @$args); - } catch { - $self->throw_exception($_) if $self->connected; - - # We were not connected - reconnect and retry, but let any - # exception fall right through this time - carp "Retrying dbh_do($code) after catching disconnected exception: $_" - if $ENV{DBIC_STORAGE_RETRY_DEBUG}; - - $self->_populate_dbh; - $self->$code($self->_dbh, @$args); - }; + DBIx::Class::Storage::BlockRunner->new( + storage => $self, + run_code => sub { $self->$run_target ($self->_get_dbh, @$args ) }, + wrap_txn => 0, + retry_handler => sub { ! ( $_[0]->retried_count or $_[0]->storage->connected ) }, + )->run; } sub txn_do { - # connects or reconnects on pid change, necessary to grab correct txn_depth - $_[0]->_get_dbh; - local $_[0]->{_in_do_block} = 1; + $_[0]->_get_dbh; # connects or reconnects on pid change, necessary to grab correct txn_depth shift->next::method(@_); } @@ -1108,12 +1107,18 @@ sub _server_info { } sub _get_server_version { - shift->_dbh_get_info(18); + shift->_dbh_get_info('SQL_DBMS_VER'); } sub _dbh_get_info { my ($self, $info) = @_; + if ($info =~ /[^0-9]/) { + $info = $DBI::Const::GetInfoType::GetInfoType{$info}; + $self->throw_exception("Info type '$_[1]' not provided by DBI::Const::GetInfoType") + unless defined $info; + } + return try { $self->_get_dbh->get_info($info) } || undef; } @@ -1220,9 +1225,11 @@ sub _do_query { my $attrs = shift @do_args; my @bind = map { [ undef, $_ ] } @do_args; - $self->_query_start($sql, \@bind); - $self->_get_dbh->do($sql, $attrs, @do_args); - $self->_query_end($sql, \@bind); + $self->dbh_do(sub { + $_[0]->_query_start($sql, \@bind); + $_[1]->do($sql, $attrs, @do_args); + $_[0]->_query_end($sql, \@bind); + }); } return $self; @@ -1236,10 +1243,7 @@ sub _connect { my ($old_connect_via, $dbh); - if ($INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}) { - $old_connect_via = $DBI::connect_via; - $DBI::connect_via = 'connect'; - } + local $DBI::connect_via = 'connect' if $INC{'Apache/DBI.pm'} && $ENV{MOD_PERL}; try { if(ref $info[0] eq 'CODE') { @@ -1301,9 +1305,6 @@ sub _connect { } catch { $self->throw_exception("DBI Connection failed: $_") - } - finally { - $DBI::connect_via = $old_connect_via if $old_connect_via; }; $self->_dbh_autocommit($dbh->{AutoCommit}); @@ -1668,10 +1669,17 @@ sub insert { # they can be fused once again with the final return $to_insert = { %$to_insert, %$prefetched_values }; + # FIXME - we seem to assume undef values as non-supplied. This is wrong. + # Investigate what does it take to s/defined/exists/ my $col_infos = $source->columns_info; my %pcols = map { $_ => 1 } $source->primary_columns; - my %retrieve_cols; + my (%retrieve_cols, $autoinc_supplied, $retrieve_autoinc_col); for my $col ($source->columns) { + if ($col_infos->{$col}{is_auto_increment}) { + $autoinc_supplied ||= 1 if defined $to_insert->{$col}; + $retrieve_autoinc_col ||= $col unless $autoinc_supplied; + } + # nothing to retrieve when explicit values are supplied next if (defined $to_insert->{$col} and ! ( ref $to_insert->{$col} eq 'SCALAR' @@ -1687,6 +1695,9 @@ sub insert { ); }; + local $self->{_autoinc_supplied_for_op} = $autoinc_supplied; + local $self->{_perform_autoinc_retrieval} = $retrieve_autoinc_col; + my ($sqla_opts, @ir_container); if (%retrieve_cols and $self->_use_insert_returning) { $sqla_opts->{returning_container} = \@ir_container @@ -1765,7 +1776,13 @@ sub insert_bulk { } } - my $colinfo_cache = {}; # since we will run _resolve_bindattrs on the same $source a lot + my $colinfos = $source->columns_info($cols); + + local $self->{_autoinc_supplied_for_op} = + (first { $_->{is_auto_increment} } values %$colinfos) + ? 1 + : 0 + ; # get a slice type index based on first row of data # a "column" in this context may refer to more than one bind value @@ -1802,7 +1819,7 @@ sub insert_bulk { # normalization of user supplied stuff my $resolved_bind = $self->_resolve_bindattrs( - $source, \@bind, $colinfo_cache, + $source, \@bind, $colinfos, ); # store value-less (attrs only) bind info - we will be comparing all @@ -1918,7 +1935,7 @@ sub insert_bulk { map { $_->[0] } @{$self->_resolve_bindattrs( - $source, [ @{$$val}[1 .. $#$$val] ], $colinfo_cache, + $source, [ @{$$val}[1 .. $#$$val] ], $colinfos, )} ], )) { @@ -1956,7 +1973,7 @@ sub insert_bulk { $guard->commit; - return (wantarray ? ($rv, $sth, @$proto_bind) : $rv); + return wantarray ? ($rv, $sth, @$proto_bind) : $rv; } # execute_for_fetch is capable of returning data just fine (it means it @@ -2093,103 +2110,6 @@ sub delete { shift->_execute('delete', @_); } -# We were sent here because the $rs contains a complex search -# which will require a subquery to select the correct rows -# (i.e. joined or limited resultsets, or non-introspectable conditions) -# -# Generating a single PK column subquery is trivial and supported -# by all RDBMS. However if we have a multicolumn PK, things get ugly. -# Look at _multipk_update_delete() -sub _subq_update_delete { - my $self = shift; - my ($rs, $op, $values) = @_; - - my $rsrc = $rs->result_source; - - # quick check if we got a sane rs on our hands - my @pcols = $rsrc->_pri_cols; - - my $sel = $rs->_resolved_attrs->{select}; - $sel = [ $sel ] unless ref $sel eq 'ARRAY'; - - if ( - join ("\x00", map { join '.', $rs->{attrs}{alias}, $_ } sort @pcols) - ne - join ("\x00", sort @$sel ) - ) { - $self->throw_exception ( - '_subq_update_delete can not be called on resultsets selecting columns other than the primary keys' - ); - } - - if (@pcols == 1) { - return $self->$op ( - $rsrc, - $op eq 'update' ? $values : (), - { $pcols[0] => { -in => $rs->as_query } }, - ); - } - - else { - return $self->_multipk_update_delete (@_); - } -} - -# ANSI SQL does not provide a reliable way to perform a multicol-PK -# resultset update/delete involving subqueries. So by default resort -# to simple (and inefficient) delete_all style per-row opearations, -# while allowing specific storages to override this with a faster -# implementation. -# -sub _multipk_update_delete { - return shift->_per_row_update_delete (@_); -} - -# This is the default loop used to delete/update rows for multi PK -# resultsets, and used by mysql exclusively (because it can't do anything -# else). -# -# We do not use $row->$op style queries, because resultset update/delete -# is not expected to cascade (this is what delete_all/update_all is for). -# -# There should be no race conditions as the entire operation is rolled -# in a transaction. -# -sub _per_row_update_delete { - my $self = shift; - my ($rs, $op, $values) = @_; - - my $rsrc = $rs->result_source; - my @pcols = $rsrc->_pri_cols; - - my $guard = $self->txn_scope_guard; - - # emulate the return value of $sth->execute for non-selects - my $row_cnt = '0E0'; - - my $subrs_cur = $rs->cursor; - my @all_pk = $subrs_cur->all; - for my $pks ( @all_pk) { - - my $cond; - for my $i (0.. $#pcols) { - $cond->{$pcols[$i]} = $pks->[$i]; - } - - $self->$op ( - $rsrc, - $op eq 'update' ? $values : (), - $cond, - ); - - $row_cnt++; - } - - $guard->commit; - - return $row_cnt; -} - sub _select { my $self = shift; $self->_execute($self->_select_args(@_)); @@ -2198,6 +2118,10 @@ sub _select { sub _select_args_to_query { my $self = shift; + $self->throw_exception( + "Unable to generate limited query representation with 'software_limit' enabled" + ) if ($_[3]->{software_limit} and ($_[3]->{offset} or $_[3]->{rows}) ); + # my ($op, $ident, $select, $cond, $rs_attrs, $rows, $offset) # = $self->_select_args($ident, $select, $cond, $attrs); my ($op, $ident, @args) = @@ -2251,8 +2175,8 @@ sub _select_args { # see if we need to tear the prefetch apart otherwise delegate the limiting to the # storage, unless software limit was requested if ( - #limited has_many - ( $attrs->{rows} && keys %{$attrs->{collapse}} ) + # limited collapsing has_many + ( $attrs->{rows} && $attrs->{collapse} ) || # grouped prefetch (to satisfy group_by == select) ( $attrs->{group_by} @@ -2273,7 +2197,15 @@ sub _select_args { } # try to simplify the joinmap further (prune unreferenced type-single joins) - $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs); + if ( + ref $ident + and + reftype $ident eq 'ARRAY' + and + @$ident != 1 + ) { + $ident = $self->_prune_unused_joins ($ident, $select, $where, $attrs); + } ### # This would be the point to deflate anything found in $where @@ -2830,18 +2762,12 @@ sub deployment_statements { data => $schema, ); - my @ret; - if (wantarray) { - @ret = $tr->translate; - } - else { - $ret[0] = $tr->translate; - } - - $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error) - unless (@ret && defined $ret[0]); - - return wantarray ? @ret : $ret[0]; + return preserve_context { + $tr->translate + } after => sub { + $self->throw_exception( 'Unable to produce deployment statements: ' . $tr->error) + unless defined $_[0]; + }; } # FIXME deploy() currently does not accurately report sql errors @@ -3032,6 +2958,13 @@ sub _is_text_lob_type { |national\s*character\s*varying))\z/xi); } +# Determine if a data_type is some type of a binary type +sub _is_binary_type { + my ($self, $data_type) = @_; + $data_type && ($self->_is_binary_lob_type($data_type) + || $data_type =~ /(?:var)?(?:binary|bit|graphic)(?:\s*varying)?/i); +} + 1; =head1 USAGE NOTES