X-Git-Url: http://git.shadowcat.co.uk/gitweb/gitweb.cgi?a=blobdiff_plain;f=lib%2FDBIx%2FClass%2FStorage%2FDBI.pm;h=32be00fa0c7a5776862802bfda326d1a600956ad;hb=94942394c18872ea5a6c309f62e15ff89127e98d;hp=3d9a200f41c55348cb4c4a88b5ca33699089e283;hpb=c0db3bf569e3673a237b058bbf703f2aabc0b36b;p=dbsrgits%2FDBIx-Class-Historic.git diff --git a/lib/DBIx/Class/Storage/DBI.pm b/lib/DBIx/Class/Storage/DBI.pm index 3d9a200..32be00f 100644 --- a/lib/DBIx/Class/Storage/DBI.pm +++ b/lib/DBIx/Class/Storage/DBI.pm @@ -1,10 +1,12 @@ package DBIx::Class::Storage::DBI; # -*- mode: cperl; cperl-indent-level: 2 -*- +use strict; +use warnings; + use base 'DBIx::Class::Storage'; +use mro 'c3'; -use strict; -use warnings; use Carp::Clan qw/^DBIx::Class/; use DBI; use DBIx::Class::Storage::DBI::Cursor; @@ -13,8 +15,8 @@ use Scalar::Util(); use List::Util(); __PACKAGE__->mk_group_accessors('simple' => - qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts - _conn_pid _conn_tid transaction_depth _dbh_autocommit savepoints/ + qw/_connect_info _dbi_connect_info _dbh _sql_maker _sql_maker_opts _conn_pid + _conn_tid transaction_depth _dbh_autocommit _driver_determined savepoints/ ); # the values for these accessors are picked out (and deleted) from @@ -90,8 +92,8 @@ recognized by DBIx::Class: =item * -A single code reference which returns a connected -L optionally followed by +A single code reference which returns a connected +L optionally followed by L recognized by DBIx::Class: @@ -110,7 +112,7 @@ mixed together: %extra_attributes, }]; -This is particularly useful for L based applications, allowing the +This is particularly useful for L based applications, allowing the following config (L style): @@ -129,7 +131,7 @@ Please note that the L docs recommend that you always explicitly set C to either I<0> or I<1>. L further recommends that it be set to I<1>, and that you perform transactions via our L method. L will set it -to I<1> if you do not do explicitly set it to zero. This is the default +to I<1> if you do not do explicitly set it to zero. This is the default for most DBDs. See L for details. =head3 DBIx::Class specific connection attributes @@ -268,7 +270,7 @@ storage object. If set to a true value, this option will disable the caching of statement handles via L. -=item limit_dialect +=item limit_dialect Sets the limit dialect. This is useful for JDBC-bridge among others where the remote SQL-dialect cannot be determined by the name of the @@ -276,7 +278,7 @@ driver alone. See also L. =item quote_char -Specifies what characters to use to quote table and column names. If +Specifies what characters to use to quote table and column names. If you use this you will want to specify L as well. C expects either a single character, in which case is it @@ -288,8 +290,8 @@ SQL Server you should use C<< quote_char => [qw/[ ]/] >>. =item name_sep -This only needs to be used in conjunction with C, and is used to -specify the charecter that seperates elements (schemas, tables, columns) from +This only needs to be used in conjunction with C, and is used to +specify the charecter that seperates elements (schemas, tables, columns) from each other. In most cases this is simply a C<.>. The consequences of not supplying this value is that L @@ -669,12 +671,20 @@ sub connected { $self->_verify_pid; return 0 if !$self->_dbh; } - return ($dbh->FETCH('Active') && $dbh->ping); + return ($dbh->FETCH('Active') && $self->_ping); } return 0; } +sub _ping { + my $self = shift; + + my $dbh = $self->_dbh or return 0; + + return $dbh->ping; +} + # handle pid changes correctly # NOTE: assumes $self->_dbh is a valid $dbh sub _verify_pid { @@ -753,21 +763,27 @@ sub _populate_dbh { sub _determine_driver { my ($self) = @_; - if (ref $self eq 'DBIx::Class::Storage::DBI') { - my $driver; + if (not $self->_driver_determined) { + if (ref($self) eq __PACKAGE__) { + my $driver; - if ($self->_dbh) { # we are connected - $driver = $self->_dbh->{Driver}{Name}; - } else { - # try to use dsn to not require being connected, the driver may still - # force a connection in _rebless to determine version - ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i; - } + if ($self->_dbh) { # we are connected + $driver = $self->_dbh->{Driver}{Name}; + } else { + # try to use dsn to not require being connected, the driver may still + # force a connection in _rebless to determine version + ($driver) = $self->_dbi_connect_info->[0] =~ /dbi:([^:]+):/i; + } - if ($self->load_optional_class("DBIx::Class::Storage::DBI::${driver}")) { - bless $self, "DBIx::Class::Storage::DBI::${driver}"; - $self->_rebless(); + my $storage_class = "DBIx::Class::Storage::DBI::${driver}"; + if ($self->load_optional_class($storage_class)) { + mro::set_mro($storage_class, 'c3'); + bless $self, $storage_class; + $self->_rebless(); + } } + + $self->_driver_determined(1); } } @@ -891,11 +907,11 @@ sub svp_begin { $self->throw_exception ("Your Storage implementation doesn't support savepoints") unless $self->can('_svp_begin'); - + push @{ $self->{savepoints} }, $name; $self->debugobj->svp_begin($name) if $self->debug; - + return $self->_svp_begin($name); } @@ -955,7 +971,7 @@ sub svp_rollback { } $self->debugobj->svp_rollback($name) if $self->debug; - + return $self->_svp_rollback($name); } @@ -1093,7 +1109,7 @@ sub _dbh_execute { my $sth = $self->sth($sql,$op); - my $placeholder_index = 1; + my $placeholder_index = 1; foreach my $bound (@$bind) { my $attributes = {}; @@ -1130,12 +1146,17 @@ sub _execute { sub insert { my ($self, $source, $to_insert) = @_; +# redispatch to insert method of storage we reblessed into, if necessary + if (not $self->_driver_determined) { + $self->_determine_driver; + goto $self->can('insert'); + } + my $ident = $source->from; my $bind_attributes = $self->source_bind_attributes($source); my $updated_cols = {}; - $self->ensure_connected; foreach my $col ( $source->columns ) { if ( !defined $to_insert->{$col} ) { my $col_info = $source->column_info($col); @@ -1152,7 +1173,7 @@ sub insert { } ## Still not quite perfect, and EXPERIMENTAL -## Currently it is assumed that all values passed will be "normal", i.e. not +## Currently it is assumed that all values passed will be "normal", i.e. not ## scalar refs, or at least, all the same type as the first set, the statement is ## only prepped once. sub insert_bulk { @@ -1161,7 +1182,7 @@ sub insert_bulk { my $table = $source->from; @colvalues{@$cols} = (0..$#$cols); my ($sql, @bind) = $self->sql_maker->insert($table, \%colvalues); - + $self->_query_start( $sql, @bind ); my $sth = $self->sth($sql); @@ -1174,7 +1195,7 @@ sub insert_bulk { my $bind_attributes = $self->source_bind_attributes($source); ## Bind the values and execute - my $placeholder_index = 1; + my $placeholder_index = 1; foreach my $bound (@bind) { @@ -1222,7 +1243,7 @@ sub update { my $self = shift @_; my $source = shift @_; my $bind_attributes = $self->source_bind_attributes($source); - + return $self->_execute('update' => [], $source, $bind_attributes, @_); } @@ -1230,9 +1251,9 @@ sub update { sub delete { my $self = shift @_; my $source = shift @_; - + my $bind_attrs = $self->source_bind_attributes($source); - + return $self->_execute('delete' => [], $source, $bind_attrs, @_); } @@ -1331,10 +1352,10 @@ sub _select { my $self = shift; # localization is neccessary as - # 1) there is no infrastructure to pass this around (easy to do, but will wait) + # 1) there is no infrastructure to pass this around before SQLA2 # 2) _select_args sets it and _prep_for_execute consumes it my $sql_maker = $self->sql_maker; - local $sql_maker->{for}; + local $sql_maker->{_dbic_rs_attrs}; return $self->_execute($self->_select_args(@_)); } @@ -1343,10 +1364,10 @@ sub _select_args_to_query { my $self = shift; # localization is neccessary as - # 1) there is no infrastructure to pass this around (easy to do, but will wait) + # 1) there is no infrastructure to pass this around before SQLA2 # 2) _select_args sets it and _prep_for_execute consumes it my $sql_maker = $self->sql_maker; - local $sql_maker->{for}; + local $sql_maker->{_dbic_rs_attrs}; # my ($op, $bind, $ident, $bind_attrs, $select, $cond, $order, $rows, $offset) # = $self->_select_args($ident, $select, $cond, $attrs); @@ -1366,8 +1387,19 @@ sub _select_args_to_query { sub _select_args { my ($self, $ident, $select, $where, $attrs) = @_; + my ($alias2source, $rs_alias) = $self->_resolve_ident_sources ($ident); + my $sql_maker = $self->sql_maker; - my $alias2source = $self->_resolve_ident_sources ($ident); + $sql_maker->{_dbic_rs_attrs} = { + %$attrs, + select => $select, + from => $ident, + where => $where, + $rs_alias + ? ( _source_handle => $alias2source->{$rs_alias}->handle ) + : () + , + }; # calculate bind_attrs before possible $ident mangling my $bind_attrs = {}; @@ -1378,8 +1410,21 @@ sub _select_args { my $fqcn = join ('.', $alias, $col); $bind_attrs->{$fqcn} = $bindtypes->{$col} if $bindtypes->{$col}; - # so that unqualified searches can be bound too - $bind_attrs->{$col} = $bind_attrs->{$fqcn} if $alias eq 'me'; + # Unqialified column names are nice, but at the same time can be + # rather ambiguous. What we do here is basically go along with + # the loop, adding an unqualified column slot to $bind_attrs, + # alongside the fully qualified name. As soon as we encounter + # another column by that name (which would imply another table) + # we unset the unqualified slot and never add any info to it + # to avoid erroneous type binding. If this happens the users + # only choice will be to fully qualify his column name + + if (exists $bind_attrs->{$col}) { + $bind_attrs->{$col} = {}; + } + else { + $bind_attrs->{$col} = $bind_attrs->{$fqcn}; + } } } @@ -1407,7 +1452,7 @@ sub _select_args { ( $attrs->{rows} && keys %{$attrs->{collapse}} ) || ( $attrs->{group_by} && @{$attrs->{group_by}} && - $attrs->{prefetch_select} && @{$attrs->{prefetch_select}} ) + $attrs->{_prefetch_select} && @{$attrs->{_prefetch_select}} ) ) { ($ident, $select, $where, $attrs) = $self->_adjust_select_args_for_complex_prefetch ($ident, $select, $where, $attrs); @@ -1428,42 +1473,46 @@ sub _select_args { my $order = { map { $attrs->{$_} ? ( $_ => $attrs->{$_} ) : () } - (qw/order_by group_by having _virtual_order_by/ ) + (qw/order_by group_by having/ ) }; - $sql_maker->{for} = delete $attrs->{for}; - return ('select', $attrs->{bind}, $ident, $bind_attrs, $select, $where, $order, @limit); } +# +# This is the code producing joined subqueries like: +# SELECT me.*, other.* FROM ( SELECT me.* FROM ... ) JOIN other ON ... +# sub _adjust_select_args_for_complex_prefetch { my ($self, $from, $select, $where, $attrs) = @_; + $self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute') + if (ref $from ne 'ARRAY'); + # copies for mangling $from = [ @$from ]; $select = [ @$select ]; $attrs = { %$attrs }; - $self->throw_exception ('Complex prefetches are not supported on resultsets with a custom from attribute') - if (ref $from ne 'ARRAY'); - # separate attributes my $sub_attrs = { %$attrs }; delete $attrs->{$_} for qw/where bind rows offset group_by having/; - delete $sub_attrs->{$_} for qw/for collapse prefetch_select _collapse_order_by select as/; + delete $sub_attrs->{$_} for qw/for collapse _prefetch_select _collapse_order_by select as/; - my $alias = $attrs->{alias}; + my $select_root_alias = $attrs->{alias}; + my $sql_maker = $self->sql_maker; - # create subquery select list - loop only over primary columns + # create subquery select list - consider only stuff *not* brought in by the prefetch my $sub_select = []; - for my $i (0 .. @{$attrs->{select}} - @{$attrs->{prefetch_select}} - 1) { + my $sub_group_by; + for my $i (0 .. @{$attrs->{select}} - @{$attrs->{_prefetch_select}} - 1) { my $sel = $attrs->{select}[$i]; # alias any functions to the dbic-side 'as' label # adjust the outer select accordingly if (ref $sel eq 'HASH' && !$sel->{-select}) { $sel = { -select => $sel, -as => $attrs->{as}[$i] }; - $select->[$i] = join ('.', $attrs->{alias}, $attrs->{as}[$i]); + $select->[$i] = join ('.', $attrs->{alias}, ($attrs->{as}[$i] || "select_$i") ); } push @$sub_select, $sel; @@ -1478,29 +1527,15 @@ sub _adjust_select_args_for_complex_prefetch { ]; } - # mangle {from} - my $select_root = shift @$from; - my @outer_from = @$from; + # mangle {from}, keep in mind that $from is "headless" from here on + my $join_root = shift @$from; my %inner_joins; my %join_info = map { $_->[0]{-alias} => $_->[0] } (@$from); - # in complex search_related chains $alias may *not* be 'me' - # so always include it in the inner join, and also shift away - # from the outer stack, so that the two datasets actually do - # meet - if ($select_root->{-alias} ne $alias) { - $inner_joins{$alias} = 1; - - while (@outer_from && $outer_from[0][0]{-alias} ne $alias) { - shift @outer_from; - } - if (! @outer_from) { - $self->throw_exception ("Unable to find '$alias' in the {from} stack, something is wrong"); - } - - shift @outer_from; # the new subquery will represent this alias, so get rid of it - } + # in complex search_related chains $select_root_alias may *not* be + # 'me' so always include it in the inner join + $inner_joins{$select_root_alias} = 1 if ($join_root->{-alias} ne $select_root_alias); # decide which parts of the join will remain on the inside @@ -1520,8 +1555,9 @@ sub _adjust_select_args_for_complex_prefetch { # It may not be very efficient, but it's a reasonable stop-gap { # produce stuff unquoted, so it can be scanned - my $sql_maker = $self->sql_maker; local $sql_maker->{quote_char}; + my $sep = $self->_sql_maker_opts->{name_sep} || '.'; + $sep = "\Q$sep\E"; my @order_by = (map { ref $_ ? $_->[0] : $_ } @@ -1529,6 +1565,7 @@ sub _adjust_select_args_for_complex_prefetch { ); my $where_sql = $sql_maker->where ($where); + my $select_sql = $sql_maker->_recurse_fields ($sub_select); # sort needed joins for my $alias (keys %join_info) { @@ -1536,8 +1573,8 @@ sub _adjust_select_args_for_complex_prefetch { # any table alias found on a column name in where or order_by # gets included in %inner_joins # Also any parent joins that are needed to reach this particular alias - for my $piece ($where_sql, @order_by ) { - if ($piece =~ /\b$alias\./) { + for my $piece ($select_sql, $where_sql, @order_by ) { + if ($piece =~ /\b $alias $sep/x) { $inner_joins{$alias} = 1; } } @@ -1560,21 +1597,22 @@ sub _adjust_select_args_for_complex_prefetch { } # construct the inner $from for the subquery - my $inner_from = [ $select_root ]; + my $inner_from = [ $join_root ]; for my $j (@$from) { push @$inner_from, $j if $inner_joins{$j->[0]{-alias}}; } # if a multi-type join was needed in the subquery ("multi" is indicated by # presence in {collapse}) - add a group_by to simulate the collapse in the subq - - for my $alias (keys %inner_joins) { - - # the dot comes from some weirdness in collapse - # remove after the rewrite - if ($attrs->{collapse}{".$alias"}) { - $sub_attrs->{group_by} ||= $sub_select; - last; + unless ($sub_attrs->{group_by}) { + for my $alias (keys %inner_joins) { + + # the dot comes from some weirdness in collapse + # remove after the rewrite + if ($attrs->{collapse}{".$alias"}) { + $sub_attrs->{group_by} ||= $sub_select; + last; + } } } @@ -1585,12 +1623,47 @@ sub _adjust_select_args_for_complex_prefetch { $where, $sub_attrs ); + my $subq_joinspec = { + -alias => $select_root_alias, + -source_handle => $join_root->{-source_handle}, + $select_root_alias => $subq, + }; - # put it in the new {from} - unshift @outer_from, { $alias => $subq }; + # Generate a new from (really just replace the join slot with the subquery) + # Before we would start the outer chain from the subquery itself (i.e. + # SELECT ... FROM (SELECT ... ) alias JOIN ..., but this turned out to be + # a bad idea for search_related, as the root of the chain was effectively + # lost (i.e. $artist_rs->search_related ('cds'... ) would result in alias + # of 'cds', which would prevent from doing things like order_by artist.*) + # See t/prefetch/via_search_related.t for a better idea + my @outer_from; + if ($join_root->{-alias} eq $select_root_alias) { # just swap the root part and we're done + @outer_from = ( + $subq_joinspec, + @$from, + ) + } + else { # this is trickier + @outer_from = ($join_root); + + for my $j (@$from) { + if ($j->[0]{-alias} eq $select_root_alias) { + push @outer_from, [ + $subq_joinspec, + @{$j}[1 .. $#$j], + ]; + } + else { + push @outer_from, $j; + } + } + } # This is totally horrific - the $where ends up in both the inner and outer query - # Unfortunately not much can be done until SQLA2 introspection arrives + # Unfortunately not much can be done until SQLA2 introspection arrives, and even + # then if where conditions apply to the *right* side of the prefetch, you may have + # to both filter the inner select (e.g. to apply a limit) and then have to re-filter + # the outer select to exclude joins you didin't want in the first place # # OTOH it can be seen as a plus: (notes that this query would make a DBA cry ;) return (\@outer_from, $select, $where, $attrs); @@ -1600,12 +1673,14 @@ sub _resolve_ident_sources { my ($self, $ident) = @_; my $alias2source = {}; + my $rs_alias; # the reason this is so contrived is that $ident may be a {from} # structure, specifying multiple tables to join if ( Scalar::Util::blessed($ident) && $ident->isa("DBIx::Class::ResultSource") ) { # this is compat mode for insert/update/delete which do not deal with aliases $alias2source->{me} = $ident; + $rs_alias = 'me'; } elsif (ref $ident eq 'ARRAY') { @@ -1613,6 +1688,7 @@ sub _resolve_ident_sources { my $tabinfo; if (ref $_ eq 'HASH') { $tabinfo = $_; + $rs_alias = $tabinfo->{-alias}; } if (ref $_ eq 'ARRAY' and ref $_->[0] eq 'HASH') { $tabinfo = $_->[0]; @@ -1623,7 +1699,57 @@ sub _resolve_ident_sources { } } - return $alias2source; + return ($alias2source, $rs_alias); +} + +# Takes $ident, \@column_names +# +# returns { $column_name => \%column_info, ... } +# also note: this adds -result_source => $rsrc to the column info +# +# usage: +# my $col_sources = $self->_resolve_column_info($ident, @column_names); +sub _resolve_column_info { + my ($self, $ident, $colnames) = @_; + my ($alias2src, $root_alias) = $self->_resolve_ident_sources($ident); + + my $sep = $self->_sql_maker_opts->{name_sep} || '.'; + $sep = "\Q$sep\E"; + + my (%return, %seen_cols); + + # compile a global list of column names, to be able to properly + # disambiguate unqualified column names (if at all possible) + for my $alias (keys %$alias2src) { + my $rsrc = $alias2src->{$alias}; + for my $colname ($rsrc->columns) { + push @{$seen_cols{$colname}}, $alias; + } + } + + COLUMN: + foreach my $col (@$colnames) { + my ($alias, $colname) = $col =~ m/^ (?: ([^$sep]+) $sep)? (.+) $/x; + + unless ($alias) { + # see if the column was seen exactly once (so we know which rsrc it came from) + if ($seen_cols{$colname} and @{$seen_cols{$colname}} == 1) { + $alias = $seen_cols{$colname}[0]; + } + else { + next COLUMN; + } + } + + my $rsrc = $alias2src->{$alias}; + $return{$col} = $rsrc && { + %{$rsrc->column_info($colname)}, + -result_source => $rsrc, + -source_alias => $alias, + }; + } + + return \%return; } # Returns a counting SELECT for a simple count @@ -1905,13 +2031,13 @@ By default, C<\%sqlt_args> will have { add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1 } -merged with the hash passed in. To disable any of those features, pass in a +merged with the hash passed in. To disable any of those features, pass in a hashref like the following { ignore_constraint_names => 0, # ... other options } -Note that this feature is currently EXPERIMENTAL and may not work correctly +Note that this feature is currently EXPERIMENTAL and may not work correctly across all databases, or fully handle complex relationships. WARNING: Please check all SQL files created, before applying them. @@ -1932,7 +2058,7 @@ sub create_ddl_dir { $version ||= $schema_version; $sqltargs = { - add_drop_table => 1, + add_drop_table => 1, ignore_constraint_names => 1, ignore_index_names => 1, %{$sqltargs || {}} @@ -1972,7 +2098,7 @@ sub create_ddl_dir { } print $file $output; close($file); - + next unless ($preversion); require SQL::Translator::Diff; @@ -1988,7 +2114,7 @@ sub create_ddl_dir { carp("Overwriting existing diff file - $difffile"); unlink($difffile); } - + my $source_schema; { my $t = SQL::Translator->new($sqltargs); @@ -2007,7 +2133,7 @@ sub create_ddl_dir { unless ( $source_schema->name ); } - # The "new" style of producers have sane normalization and can support + # The "new" style of producers have sane normalization and can support # diffing a SQL file against a DBIC->SQLT schema. Old style ones don't # And we have to diff parsed SQL against parsed SQL. my $dest_schema = $sqlt_schema; @@ -2028,12 +2154,12 @@ sub create_ddl_dir { $dest_schema->name( $filename ) unless $dest_schema->name; } - + my $diff = SQL::Translator::Diff::schema_diff($source_schema, $db, $dest_schema, $db, $sqltargs ); - if(!open $file, ">$difffile") { + if(!open $file, ">$difffile") { $self->throw_exception("Can't write to $difffile ($!)"); next; } @@ -2077,7 +2203,7 @@ sub deployment_statements { if(-f $filename) { my $file; - open($file, "<$filename") + open($file, "<$filename") or $self->throw_exception("Can't open $filename ($!)"); my @rows = <$file>; close($file); @@ -2092,7 +2218,7 @@ sub deployment_statements { eval qq{use SQL::Translator::Producer::${type}}; $self->throw_exception($@) if $@; - # sources needs to be a parser arg, but for simplicty allow at top level + # sources needs to be a parser arg, but for simplicty allow at top level # coming in $sqltargs->{parser_args}{sources} = delete $sqltargs->{sources} if exists $sqltargs->{sources}; @@ -2197,7 +2323,7 @@ returned by databases that don't support replication. sub is_replicating { return; - + } =head2 lag_behind_master